From e7e71a03c35316cfc4279f19bcaaca73fea339d0 Mon Sep 17 00:00:00 2001 From: Todd Malsbary Date: Mon, 24 Jan 2022 14:42:29 -0800 Subject: [PATCH] Remove BPA operator Signed-off-by: Todd Malsbary Change-Id: Id7c9306378dd0fd252775bde8933251843bdc204 --- cmd/bpa-operator/.gitignore | 77 - cmd/bpa-operator/Makefile | 67 - cmd/bpa-operator/bpa_operator_launch.sh | 22 - cmd/bpa-operator/build/Dockerfile | 8 - cmd/bpa-operator/build/bin/entrypoint | 12 - cmd/bpa-operator/build/bin/user_setup | 13 - cmd/bpa-operator/cmd/manager/main.go | 162 - .../bpa_v1alpha1_provisioning_cr.yaml | 16 - .../bpa_v1alpha1_provisioning_cr_2.yaml | 16 - .../bpa_v1alpha1_provisioning_cr_multiple_3.yaml | 18 - ...v1alpha1_provisioning_cr_sample_multiple_1.yaml | 14 - ...v1alpha1_provisioning_cr_sample_multiple_2.yaml | 18 - ...bpa_v1alpha1_provisioning_cr_sample_single.yaml | 11 - ...a_v1alpha1_provisioning_cr_sample_single_2.yaml | 14 - ...a_v1alpha1_provisioning_cr_sample_single_3.yaml | 13 - .../bpa_v1alpha1_provisioning_cr_vm_multiple.yaml | 15 - .../bpa_v1alpha1_provisioning_cr_vm_single.yaml | 12 - .../bpa_v1alpha1_provisioning_crd.yaml | 40 - .../provisioning_cr_kud_plugins_sample.yaml | 13 - .../software-crd/bpa_v1alpha1_software_cr.yaml | 19 - .../software-crd/bpa_v1alpha1_software_crd.yaml | 40 - .../test_bpa_v1alpha1_software_cr.yaml | 19 - cmd/bpa-operator/deploy/kud-installer.yaml | 28 - .../deploy/netattachdef-flannel-vm.yaml | 20 - cmd/bpa-operator/deploy/operator.yaml | 43 - cmd/bpa-operator/deploy/role.yaml | 74 - cmd/bpa-operator/deploy/role_binding.yaml | 12 - cmd/bpa-operator/deploy/service_account.yaml | 4 - .../deploy/virtlet-deployment-sample.yaml | 74 - cmd/bpa-operator/e2etest/bpa_bmh_verifier.sh | 193 - .../e2etest/bpa_remote_compute_cr_sample.yaml | 14 - .../e2etest/bpa_remote_virtletvm_cr.yaml | 13 - .../e2etest/bpa_remote_virtletvm_cr_multi.yaml | 22 - .../e2etest/bpa_remote_virtletvm_verifier.sh | 77 - .../e2e_virtletvm_test_provisioning_cr.yaml | 12 - .../e2etest/test_bmh_provisioning_cr.yaml | 16 - cmd/bpa-operator/go.mod | 45 - cmd/bpa-operator/go.sum | 1237 - .../pkg/apis/addtoscheme_bpa_v1alpha1.go | 10 - cmd/bpa-operator/pkg/apis/apis.go | 13 - cmd/bpa-operator/pkg/apis/bpa/group.go | 6 - cmd/bpa-operator/pkg/apis/bpa/v1alpha1/doc.go | 4 - .../pkg/apis/bpa/v1alpha1/provisioning_types.go | 70 - cmd/bpa-operator/pkg/apis/bpa/v1alpha1/register.go | 19 - .../pkg/apis/bpa/v1alpha1/software_types.go | 53 - .../pkg/apis/bpa/v1alpha1/zz_generated.deepcopy.go | 253 - .../pkg/apis/bpa/v1alpha1/zz_generated.openapi.go | 85 - .../pkg/controller/add_provisioning.go | 10 - cmd/bpa-operator/pkg/controller/controller.go | 18 - .../provisioning/provisioning_controller.go | 864 - .../provisioning/provisioning_controller_test.go | 205 - cmd/bpa-operator/tools.go | 7 - .../vendor/cloud.google.com/go/AUTHORS | 15 - .../vendor/cloud.google.com/go/CONTRIBUTORS | 40 - .../vendor/cloud.google.com/go/LICENSE | 202 - .../go/compute/metadata/metadata.go | 513 - .../exporter/ocagent/.travis.yml | 18 - .../exporter/ocagent/CONTRIBUTING.md | 24 - .../exporter/ocagent/LICENSE | 201 - .../exporter/ocagent/README.md | 61 - .../exporter/ocagent/common.go | 38 - .../exporter/ocagent/connection.go | 97 - .../exporter/ocagent/go.mod | 12 - .../exporter/ocagent/go.sum | 152 - .../exporter/ocagent/nodeinfo.go | 46 - .../exporter/ocagent/ocagent.go | 496 - .../exporter/ocagent/options.go | 128 - .../exporter/ocagent/transform_spans.go | 248 - .../exporter/ocagent/transform_stats_to_metrics.go | 274 - .../exporter/ocagent/version.go | 17 - .../vendor/github.com/Azure/go-autorest/LICENSE | 191 - .../Azure/go-autorest/autorest/adal/README.md | 292 - .../Azure/go-autorest/autorest/adal/config.go | 91 - .../Azure/go-autorest/autorest/adal/devicetoken.go | 242 - .../Azure/go-autorest/autorest/adal/persist.go | 73 - .../Azure/go-autorest/autorest/adal/sender.go | 60 - .../Azure/go-autorest/autorest/adal/token.go | 985 - .../Azure/go-autorest/autorest/adal/version.go | 45 - .../Azure/go-autorest/autorest/authorization.go | 287 - .../Azure/go-autorest/autorest/autorest.go | 150 - .../Azure/go-autorest/autorest/azure/async.go | 992 - .../Azure/go-autorest/autorest/azure/azure.go | 326 - .../go-autorest/autorest/azure/environments.go | 196 - .../autorest/azure/metadata_environment.go | 245 - .../Azure/go-autorest/autorest/azure/rp.go | 200 - .../Azure/go-autorest/autorest/client.go | 276 - .../Azure/go-autorest/autorest/date/date.go | 96 - .../Azure/go-autorest/autorest/date/time.go | 103 - .../Azure/go-autorest/autorest/date/timerfc1123.go | 100 - .../Azure/go-autorest/autorest/date/unixtime.go | 123 - .../Azure/go-autorest/autorest/date/utility.go | 25 - .../github.com/Azure/go-autorest/autorest/error.go | 98 - .../Azure/go-autorest/autorest/preparer.go | 480 - .../Azure/go-autorest/autorest/responder.go | 250 - .../Azure/go-autorest/autorest/retriablerequest.go | 52 - .../go-autorest/autorest/retriablerequest_1.7.go | 54 - .../go-autorest/autorest/retriablerequest_1.8.go | 66 - .../Azure/go-autorest/autorest/sender.go | 326 - .../Azure/go-autorest/autorest/utility.go | 228 - .../Azure/go-autorest/autorest/version.go | 41 - .../github.com/Azure/go-autorest/logger/logger.go | 328 - .../Azure/go-autorest/tracing/tracing.go | 190 - .../github.com/PuerkitoBio/purell/.gitignore | 5 - .../github.com/PuerkitoBio/purell/.travis.yml | 12 - .../vendor/github.com/PuerkitoBio/purell/LICENSE | 12 - .../vendor/github.com/PuerkitoBio/purell/README.md | 188 - .../vendor/github.com/PuerkitoBio/purell/purell.go | 379 - .../github.com/PuerkitoBio/urlesc/.travis.yml | 15 - .../vendor/github.com/PuerkitoBio/urlesc/LICENSE | 27 - .../vendor/github.com/PuerkitoBio/urlesc/README.md | 16 - .../vendor/github.com/PuerkitoBio/urlesc/urlesc.go | 180 - .../github.com/appscode/jsonpatch/.gitignore | 26 - .../github.com/appscode/jsonpatch/.travis.yml | 10 - .../vendor/github.com/appscode/jsonpatch/LICENSE | 202 - .../vendor/github.com/appscode/jsonpatch/README.md | 53 - .../vendor/github.com/appscode/jsonpatch/go.mod | 8 - .../vendor/github.com/appscode/jsonpatch/go.sum | 8 - .../github.com/appscode/jsonpatch/jsonpatch.go | 336 - .../vendor/github.com/beorn7/perks/LICENSE | 20 - .../beorn7/perks/quantile/exampledata.txt | 2388 - .../github.com/beorn7/perks/quantile/stream.go | 316 - .../opencensus-proto/AUTHORS | 1 - .../opencensus-proto/LICENSE | 202 - .../gen-go/agent/common/v1/common.pb.go | 356 - .../gen-go/agent/metrics/v1/metrics_service.pb.go | 264 - .../gen-go/agent/trace/v1/trace_service.pb.go | 443 - .../gen-go/agent/trace/v1/trace_service.pb.gw.go | 154 - .../gen-go/metrics/v1/metrics.pb.go | 1126 - .../gen-go/resource/v1/resource.pb.go | 99 - .../opencensus-proto/gen-go/trace/v1/trace.pb.go | 1543 - .../gen-go/trace/v1/trace_config.pb.go | 358 - .../github.com/coreos/prometheus-operator/LICENSE | 202 - .../github.com/coreos/prometheus-operator/NOTICE | 5 - .../pkg/apis/monitoring/register.go | 19 - .../pkg/apis/monitoring/v1/crd_kinds.go | 81 - .../pkg/apis/monitoring/v1/doc.go | 18 - .../pkg/apis/monitoring/v1/openapi_generated.go | 13966 ----- .../pkg/apis/monitoring/v1/register.go | 61 - .../pkg/apis/monitoring/v1/types.go | 872 - .../apis/monitoring/v1/zz_generated.deepcopy.go | 1193 - .../pkg/client/versioned/scheme/doc.go | 18 - .../pkg/client/versioned/scheme/register.go | 54 - .../versioned/typed/monitoring/v1/alertmanager.go | 189 - .../client/versioned/typed/monitoring/v1/doc.go | 18 - .../typed/monitoring/v1/generated_expansion.go | 25 - .../typed/monitoring/v1/monitoring_client.go | 103 - .../versioned/typed/monitoring/v1/prometheus.go | 189 - .../typed/monitoring/v1/prometheusrule.go | 172 - .../typed/monitoring/v1/servicemonitor.go | 172 - .../vendor/github.com/davecgh/go-spew/LICENSE | 15 - .../github.com/davecgh/go-spew/spew/bypass.go | 145 - .../github.com/davecgh/go-spew/spew/bypasssafe.go | 38 - .../github.com/davecgh/go-spew/spew/common.go | 341 - .../github.com/davecgh/go-spew/spew/config.go | 306 - .../vendor/github.com/davecgh/go-spew/spew/doc.go | 211 - .../vendor/github.com/davecgh/go-spew/spew/dump.go | 509 - .../github.com/davecgh/go-spew/spew/format.go | 419 - .../vendor/github.com/davecgh/go-spew/spew/spew.go | 148 - .../vendor/github.com/dgrijalva/jwt-go/.gitignore | 4 - .../vendor/github.com/dgrijalva/jwt-go/.travis.yml | 13 - .../vendor/github.com/dgrijalva/jwt-go/LICENSE | 8 - .../github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md | 97 - .../vendor/github.com/dgrijalva/jwt-go/README.md | 100 - .../github.com/dgrijalva/jwt-go/VERSION_HISTORY.md | 118 - .../vendor/github.com/dgrijalva/jwt-go/claims.go | 134 - .../vendor/github.com/dgrijalva/jwt-go/doc.go | 4 - .../vendor/github.com/dgrijalva/jwt-go/ecdsa.go | 148 - .../github.com/dgrijalva/jwt-go/ecdsa_utils.go | 67 - .../vendor/github.com/dgrijalva/jwt-go/errors.go | 59 - .../vendor/github.com/dgrijalva/jwt-go/hmac.go | 95 - .../github.com/dgrijalva/jwt-go/map_claims.go | 94 - .../vendor/github.com/dgrijalva/jwt-go/none.go | 52 - .../vendor/github.com/dgrijalva/jwt-go/parser.go | 148 - .../vendor/github.com/dgrijalva/jwt-go/rsa.go | 101 - .../vendor/github.com/dgrijalva/jwt-go/rsa_pss.go | 126 - .../github.com/dgrijalva/jwt-go/rsa_utils.go | 101 - .../github.com/dgrijalva/jwt-go/signing_method.go | 35 - .../vendor/github.com/dgrijalva/jwt-go/token.go | 108 - .../github.com/emicklei/go-restful/.gitignore | 70 - .../github.com/emicklei/go-restful/.travis.yml | 6 - .../github.com/emicklei/go-restful/CHANGES.md | 261 - .../vendor/github.com/emicklei/go-restful/LICENSE | 22 - .../vendor/github.com/emicklei/go-restful/Makefile | 7 - .../github.com/emicklei/go-restful/README.md | 88 - .../vendor/github.com/emicklei/go-restful/Srcfile | 1 - .../github.com/emicklei/go-restful/bench_test.sh | 10 - .../github.com/emicklei/go-restful/compress.go | 123 - .../emicklei/go-restful/compressor_cache.go | 103 - .../emicklei/go-restful/compressor_pools.go | 91 - .../github.com/emicklei/go-restful/compressors.go | 54 - .../github.com/emicklei/go-restful/constants.go | 30 - .../github.com/emicklei/go-restful/container.go | 377 - .../github.com/emicklei/go-restful/cors_filter.go | 202 - .../github.com/emicklei/go-restful/coverage.sh | 2 - .../vendor/github.com/emicklei/go-restful/curly.go | 164 - .../github.com/emicklei/go-restful/curly_route.go | 54 - .../vendor/github.com/emicklei/go-restful/doc.go | 185 - .../emicklei/go-restful/entity_accessors.go | 162 - .../github.com/emicklei/go-restful/filter.go | 35 - .../vendor/github.com/emicklei/go-restful/json.go | 11 - .../github.com/emicklei/go-restful/jsoniter.go | 12 - .../github.com/emicklei/go-restful/jsr311.go | 297 - .../github.com/emicklei/go-restful/log/log.go | 34 - .../github.com/emicklei/go-restful/logger.go | 32 - .../vendor/github.com/emicklei/go-restful/mime.go | 45 - .../emicklei/go-restful/options_filter.go | 34 - .../github.com/emicklei/go-restful/parameter.go | 143 - .../emicklei/go-restful/path_expression.go | 74 - .../emicklei/go-restful/path_processor.go | 63 - .../github.com/emicklei/go-restful/request.go | 118 - .../github.com/emicklei/go-restful/response.go | 250 - .../vendor/github.com/emicklei/go-restful/route.go | 170 - .../emicklei/go-restful/route_builder.go | 317 - .../github.com/emicklei/go-restful/router.go | 20 - .../emicklei/go-restful/service_error.go | 23 - .../github.com/emicklei/go-restful/web_service.go | 290 - .../emicklei/go-restful/web_service_container.go | 39 - .../github.com/evanphx/json-patch/.travis.yml | 16 - .../vendor/github.com/evanphx/json-patch/LICENSE | 25 - .../vendor/github.com/evanphx/json-patch/README.md | 292 - .../vendor/github.com/evanphx/json-patch/merge.go | 383 - .../vendor/github.com/evanphx/json-patch/patch.go | 682 - .../vendor/github.com/ghodss/yaml/.gitignore | 20 - .../vendor/github.com/ghodss/yaml/.travis.yml | 7 - .../vendor/github.com/ghodss/yaml/LICENSE | 50 - .../vendor/github.com/ghodss/yaml/README.md | 121 - .../vendor/github.com/ghodss/yaml/fields.go | 501 - .../vendor/github.com/ghodss/yaml/yaml.go | 277 - .../vendor/github.com/go-logr/logr/LICENSE | 201 - .../vendor/github.com/go-logr/logr/README.md | 36 - .../vendor/github.com/go-logr/logr/logr.go | 151 - .../vendor/github.com/go-logr/zapr/.gitignore | 3 - .../vendor/github.com/go-logr/zapr/Gopkg.lock | 52 - .../vendor/github.com/go-logr/zapr/Gopkg.toml | 38 - .../vendor/github.com/go-logr/zapr/LICENSE | 201 - .../vendor/github.com/go-logr/zapr/README.md | 45 - .../vendor/github.com/go-logr/zapr/zapr.go | 163 - .../go-openapi/jsonpointer/.editorconfig | 26 - .../github.com/go-openapi/jsonpointer/.gitignore | 1 - .../github.com/go-openapi/jsonpointer/.travis.yml | 15 - .../go-openapi/jsonpointer/CODE_OF_CONDUCT.md | 74 - .../github.com/go-openapi/jsonpointer/LICENSE | 202 - .../github.com/go-openapi/jsonpointer/README.md | 15 - .../github.com/go-openapi/jsonpointer/go.mod | 10 - .../github.com/go-openapi/jsonpointer/go.sum | 11 - .../github.com/go-openapi/jsonpointer/pointer.go | 390 - .../github.com/go-openapi/jsonreference/.gitignore | 1 - .../go-openapi/jsonreference/.travis.yml | 16 - .../go-openapi/jsonreference/CODE_OF_CONDUCT.md | 74 - .../github.com/go-openapi/jsonreference/LICENSE | 202 - .../github.com/go-openapi/jsonreference/README.md | 15 - .../github.com/go-openapi/jsonreference/go.mod | 15 - .../github.com/go-openapi/jsonreference/go.sum | 20 - .../go-openapi/jsonreference/reference.go | 156 - .../github.com/go-openapi/spec/.editorconfig | 26 - .../vendor/github.com/go-openapi/spec/.gitignore | 2 - .../github.com/go-openapi/spec/.golangci.yml | 23 - .../vendor/github.com/go-openapi/spec/.travis.yml | 18 - .../github.com/go-openapi/spec/CODE_OF_CONDUCT.md | 74 - .../vendor/github.com/go-openapi/spec/LICENSE | 202 - .../vendor/github.com/go-openapi/spec/README.md | 10 - .../vendor/github.com/go-openapi/spec/bindata.go | 297 - .../vendor/github.com/go-openapi/spec/cache.go | 60 - .../github.com/go-openapi/spec/contact_info.go | 24 - .../vendor/github.com/go-openapi/spec/debug.go | 47 - .../vendor/github.com/go-openapi/spec/expander.go | 650 - .../github.com/go-openapi/spec/external_docs.go | 24 - .../vendor/github.com/go-openapi/spec/go.mod | 16 - .../vendor/github.com/go-openapi/spec/go.sum | 22 - .../vendor/github.com/go-openapi/spec/header.go | 197 - .../vendor/github.com/go-openapi/spec/info.go | 165 - .../vendor/github.com/go-openapi/spec/items.go | 237 - .../vendor/github.com/go-openapi/spec/license.go | 23 - .../github.com/go-openapi/spec/normalizer.go | 152 - .../vendor/github.com/go-openapi/spec/operation.go | 398 - .../vendor/github.com/go-openapi/spec/parameter.go | 321 - .../vendor/github.com/go-openapi/spec/path_item.go | 87 - .../vendor/github.com/go-openapi/spec/paths.go | 97 - .../vendor/github.com/go-openapi/spec/ref.go | 191 - .../vendor/github.com/go-openapi/spec/response.go | 131 - .../vendor/github.com/go-openapi/spec/responses.go | 127 - .../vendor/github.com/go-openapi/spec/schema.go | 589 - .../github.com/go-openapi/spec/schema_loader.go | 275 - .../github.com/go-openapi/spec/security_scheme.go | 140 - .../vendor/github.com/go-openapi/spec/spec.go | 86 - .../vendor/github.com/go-openapi/spec/swagger.go | 324 - .../vendor/github.com/go-openapi/spec/tag.go | 75 - .../vendor/github.com/go-openapi/spec/unused.go | 174 - .../github.com/go-openapi/spec/xml_object.go | 68 - .../github.com/go-openapi/swag/.editorconfig | 26 - .../vendor/github.com/go-openapi/swag/.gitignore | 3 - .../github.com/go-openapi/swag/.golangci.yml | 22 - .../vendor/github.com/go-openapi/swag/.travis.yml | 16 - .../github.com/go-openapi/swag/CODE_OF_CONDUCT.md | 74 - .../vendor/github.com/go-openapi/swag/LICENSE | 202 - .../vendor/github.com/go-openapi/swag/README.md | 23 - .../vendor/github.com/go-openapi/swag/convert.go | 207 - .../github.com/go-openapi/swag/convert_types.go | 595 - .../vendor/github.com/go-openapi/swag/doc.go | 33 - .../vendor/github.com/go-openapi/swag/go.mod | 9 - .../vendor/github.com/go-openapi/swag/go.sum | 9 - .../vendor/github.com/go-openapi/swag/json.go | 312 - .../vendor/github.com/go-openapi/swag/loading.go | 80 - .../vendor/github.com/go-openapi/swag/net.go | 38 - .../vendor/github.com/go-openapi/swag/path.go | 59 - .../vendor/github.com/go-openapi/swag/post_go18.go | 23 - .../vendor/github.com/go-openapi/swag/post_go19.go | 67 - .../vendor/github.com/go-openapi/swag/pre_go18.go | 23 - .../vendor/github.com/go-openapi/swag/pre_go19.go | 69 - .../vendor/github.com/go-openapi/swag/util.go | 412 - .../vendor/github.com/go-openapi/swag/yaml.go | 227 - .../vendor/github.com/gobuffalo/envy/.env | 5 - .../vendor/github.com/gobuffalo/envy/.gitignore | 29 - .../github.com/gobuffalo/envy/.gometalinter.json | 3 - .../vendor/github.com/gobuffalo/envy/.travis.yml | 36 - .../vendor/github.com/gobuffalo/envy/LICENSE.txt | 8 - .../vendor/github.com/gobuffalo/envy/Makefile | 46 - .../vendor/github.com/gobuffalo/envy/README.md | 93 - .../vendor/github.com/gobuffalo/envy/SHOULDERS.md | 16 - .../vendor/github.com/gobuffalo/envy/envy.go | 268 - .../vendor/github.com/gobuffalo/envy/go.mod | 8 - .../vendor/github.com/gobuffalo/envy/go.sum | 17 - .../vendor/github.com/gobuffalo/envy/version.go | 3 - .../vendor/github.com/gogo/protobuf/AUTHORS | 15 - .../vendor/github.com/gogo/protobuf/CONTRIBUTORS | 23 - .../vendor/github.com/gogo/protobuf/LICENSE | 35 - .../vendor/github.com/gogo/protobuf/proto/Makefile | 43 - .../vendor/github.com/gogo/protobuf/proto/clone.go | 258 - .../github.com/gogo/protobuf/proto/custom_gogo.go | 39 - .../github.com/gogo/protobuf/proto/decode.go | 427 - .../github.com/gogo/protobuf/proto/deprecated.go | 63 - .../github.com/gogo/protobuf/proto/discard.go | 350 - .../github.com/gogo/protobuf/proto/duration.go | 100 - .../gogo/protobuf/proto/duration_gogo.go | 49 - .../github.com/gogo/protobuf/proto/encode.go | 203 - .../github.com/gogo/protobuf/proto/encode_gogo.go | 33 - .../vendor/github.com/gogo/protobuf/proto/equal.go | 300 - .../github.com/gogo/protobuf/proto/extensions.go | 604 - .../gogo/protobuf/proto/extensions_gogo.go | 368 - .../vendor/github.com/gogo/protobuf/proto/lib.go | 967 - .../github.com/gogo/protobuf/proto/lib_gogo.go | 50 - .../github.com/gogo/protobuf/proto/message_set.go | 181 - .../gogo/protobuf/proto/pointer_reflect.go | 357 - .../gogo/protobuf/proto/pointer_reflect_gogo.go | 59 - .../gogo/protobuf/proto/pointer_unsafe.go | 308 - .../gogo/protobuf/proto/pointer_unsafe_gogo.go | 56 - .../github.com/gogo/protobuf/proto/properties.go | 599 - .../gogo/protobuf/proto/properties_gogo.go | 36 - .../github.com/gogo/protobuf/proto/skip_gogo.go | 119 - .../gogo/protobuf/proto/table_marshal.go | 3006 -- .../gogo/protobuf/proto/table_marshal_gogo.go | 388 - .../github.com/gogo/protobuf/proto/table_merge.go | 657 - .../gogo/protobuf/proto/table_unmarshal.go | 2245 - .../gogo/protobuf/proto/table_unmarshal_gogo.go | 385 - .../vendor/github.com/gogo/protobuf/proto/text.go | 928 - .../github.com/gogo/protobuf/proto/text_gogo.go | 57 - .../github.com/gogo/protobuf/proto/text_parser.go | 1018 - .../github.com/gogo/protobuf/proto/timestamp.go | 113 - .../gogo/protobuf/proto/timestamp_gogo.go | 49 - .../github.com/gogo/protobuf/proto/wrappers.go | 1888 - .../gogo/protobuf/proto/wrappers_gogo.go | 113 - .../github.com/gogo/protobuf/sortkeys/sortkeys.go | 101 - .../vendor/github.com/golang/groupcache/LICENSE | 191 - .../vendor/github.com/golang/groupcache/lru/lru.go | 133 - .../vendor/github.com/golang/protobuf/AUTHORS | 3 - .../vendor/github.com/golang/protobuf/CONTRIBUTORS | 3 - .../vendor/github.com/golang/protobuf/LICENSE | 28 - .../github.com/golang/protobuf/jsonpb/jsonpb.go | 1271 - .../github.com/golang/protobuf/proto/clone.go | 253 - .../github.com/golang/protobuf/proto/decode.go | 427 - .../github.com/golang/protobuf/proto/deprecated.go | 63 - .../github.com/golang/protobuf/proto/discard.go | 350 - .../github.com/golang/protobuf/proto/encode.go | 203 - .../github.com/golang/protobuf/proto/equal.go | 301 - .../github.com/golang/protobuf/proto/extensions.go | 607 - .../vendor/github.com/golang/protobuf/proto/lib.go | 965 - .../golang/protobuf/proto/message_set.go | 181 - .../golang/protobuf/proto/pointer_reflect.go | 360 - .../golang/protobuf/proto/pointer_unsafe.go | 313 - .../github.com/golang/protobuf/proto/properties.go | 545 - .../golang/protobuf/proto/table_marshal.go | 2776 - .../golang/protobuf/proto/table_merge.go | 654 - .../golang/protobuf/proto/table_unmarshal.go | 2053 - .../github.com/golang/protobuf/proto/text.go | 843 - .../golang/protobuf/proto/text_parser.go | 880 - .../protoc-gen-go/descriptor/descriptor.pb.go | 2887 - .../protoc-gen-go/descriptor/descriptor.proto | 883 - .../protobuf/protoc-gen-go/generator/generator.go | 2806 - .../generator/internal/remap/remap.go | 117 - .../protobuf/protoc-gen-go/plugin/plugin.pb.go | 369 - .../protobuf/protoc-gen-go/plugin/plugin.pb.golden | 83 - .../protobuf/protoc-gen-go/plugin/plugin.proto | 167 - .../github.com/golang/protobuf/ptypes/any.go | 141 - .../golang/protobuf/ptypes/any/any.pb.go | 200 - .../golang/protobuf/ptypes/any/any.proto | 154 - .../github.com/golang/protobuf/ptypes/doc.go | 35 - .../github.com/golang/protobuf/ptypes/duration.go | 102 - .../golang/protobuf/ptypes/duration/duration.pb.go | 161 - .../golang/protobuf/ptypes/duration/duration.proto | 117 - .../golang/protobuf/ptypes/struct/struct.pb.go | 336 - .../golang/protobuf/ptypes/struct/struct.proto | 96 - .../github.com/golang/protobuf/ptypes/timestamp.go | 132 - .../protobuf/ptypes/timestamp/timestamp.pb.go | 179 - .../protobuf/ptypes/timestamp/timestamp.proto | 135 - .../golang/protobuf/ptypes/wrappers/wrappers.pb.go | 461 - .../golang/protobuf/ptypes/wrappers/wrappers.proto | 118 - .../vendor/github.com/google/btree/.travis.yml | 1 - .../vendor/github.com/google/btree/LICENSE | 202 - .../vendor/github.com/google/btree/README.md | 12 - .../vendor/github.com/google/btree/btree.go | 890 - .../vendor/github.com/google/btree/btree_mem.go | 76 - .../vendor/github.com/google/gofuzz/.travis.yml | 13 - .../github.com/google/gofuzz/CONTRIBUTING.md | 67 - .../vendor/github.com/google/gofuzz/LICENSE | 202 - .../vendor/github.com/google/gofuzz/README.md | 71 - .../vendor/github.com/google/gofuzz/doc.go | 18 - .../vendor/github.com/google/gofuzz/fuzz.go | 487 - .../vendor/github.com/google/gofuzz/go.mod | 3 - .../vendor/github.com/google/uuid/.travis.yml | 9 - .../vendor/github.com/google/uuid/CONTRIBUTING.md | 10 - .../vendor/github.com/google/uuid/CONTRIBUTORS | 9 - .../vendor/github.com/google/uuid/LICENSE | 27 - .../vendor/github.com/google/uuid/README.md | 19 - .../vendor/github.com/google/uuid/dce.go | 80 - .../vendor/github.com/google/uuid/doc.go | 12 - .../vendor/github.com/google/uuid/go.mod | 1 - .../vendor/github.com/google/uuid/hash.go | 53 - .../vendor/github.com/google/uuid/marshal.go | 37 - .../vendor/github.com/google/uuid/node.go | 90 - .../vendor/github.com/google/uuid/node_js.go | 12 - .../vendor/github.com/google/uuid/node_net.go | 33 - .../vendor/github.com/google/uuid/sql.go | 59 - .../vendor/github.com/google/uuid/time.go | 123 - .../vendor/github.com/google/uuid/util.go | 43 - .../vendor/github.com/google/uuid/uuid.go | 245 - .../vendor/github.com/google/uuid/version1.go | 44 - .../vendor/github.com/google/uuid/version4.go | 38 - .../vendor/github.com/googleapis/gnostic/LICENSE | 203 - .../googleapis/gnostic/OpenAPIv2/OpenAPIv2.go | 8728 --- .../googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go | 4455 -- .../googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto | 663 - .../googleapis/gnostic/OpenAPIv2/README.md | 16 - .../googleapis/gnostic/OpenAPIv2/openapi-2.0.json | 1610 - .../googleapis/gnostic/compiler/README.md | 3 - .../googleapis/gnostic/compiler/context.go | 43 - .../googleapis/gnostic/compiler/error.go | 61 - .../gnostic/compiler/extension-handler.go | 101 - .../googleapis/gnostic/compiler/helpers.go | 197 - .../github.com/googleapis/gnostic/compiler/main.go | 16 - .../googleapis/gnostic/compiler/reader.go | 175 - .../gnostic/extensions/COMPILE-EXTENSION.sh | 5 - .../googleapis/gnostic/extensions/README.md | 5 - .../googleapis/gnostic/extensions/extension.pb.go | 218 - .../googleapis/gnostic/extensions/extension.proto | 93 - .../googleapis/gnostic/extensions/extensions.go | 82 - .../github.com/gophercloud/gophercloud/.gitignore | 3 - .../github.com/gophercloud/gophercloud/.travis.yml | 25 - .../github.com/gophercloud/gophercloud/.zuul.yaml | 102 - .../gophercloud/gophercloud/CHANGELOG.md | 0 .../github.com/gophercloud/gophercloud/LICENSE | 191 - .../github.com/gophercloud/gophercloud/README.md | 159 - .../gophercloud/gophercloud/auth_options.go | 437 - .../gophercloud/gophercloud/auth_result.go | 52 - .../github.com/gophercloud/gophercloud/doc.go | 110 - .../gophercloud/gophercloud/endpoint_search.go | 76 - .../github.com/gophercloud/gophercloud/errors.go | 471 - .../github.com/gophercloud/gophercloud/go.mod | 7 - .../github.com/gophercloud/gophercloud/go.sum | 8 - .../gophercloud/gophercloud/openstack/auth_env.go | 125 - .../gophercloud/gophercloud/openstack/client.go | 438 - .../gophercloud/gophercloud/openstack/doc.go | 14 - .../gophercloud/openstack/endpoint_location.go | 107 - .../gophercloud/gophercloud/openstack/errors.go | 71 - .../openstack/identity/v2/tenants/doc.go | 65 - .../openstack/identity/v2/tenants/requests.go | 116 - .../openstack/identity/v2/tenants/results.go | 91 - .../openstack/identity/v2/tenants/urls.go | 23 - .../openstack/identity/v2/tokens/doc.go | 46 - .../openstack/identity/v2/tokens/requests.go | 103 - .../openstack/identity/v2/tokens/results.go | 174 - .../openstack/identity/v2/tokens/urls.go | 13 - .../openstack/identity/v3/tokens/doc.go | 108 - .../openstack/identity/v3/tokens/requests.go | 162 - .../openstack/identity/v3/tokens/results.go | 178 - .../openstack/identity/v3/tokens/urls.go | 7 - .../gophercloud/openstack/utils/base_endpoint.go | 28 - .../gophercloud/openstack/utils/choose_version.go | 111 - .../gophercloud/gophercloud/pagination/http.go | 60 - .../gophercloud/gophercloud/pagination/linked.go | 92 - .../gophercloud/gophercloud/pagination/marker.go | 58 - .../gophercloud/gophercloud/pagination/pager.go | 251 - .../gophercloud/gophercloud/pagination/pkg.go | 4 - .../gophercloud/gophercloud/pagination/single.go | 33 - .../github.com/gophercloud/gophercloud/params.go | 491 - .../gophercloud/gophercloud/provider_client.go | 501 - .../github.com/gophercloud/gophercloud/results.go | 448 - .../gophercloud/gophercloud/service_client.go | 154 - .../github.com/gophercloud/gophercloud/util.go | 102 - .../github.com/gregjones/httpcache/.travis.yml | 19 - .../github.com/gregjones/httpcache/LICENSE.txt | 7 - .../github.com/gregjones/httpcache/README.md | 25 - .../gregjones/httpcache/diskcache/diskcache.go | 61 - .../github.com/gregjones/httpcache/httpcache.go | 551 - .../grpc-ecosystem/grpc-gateway/LICENSE.txt | 27 - .../grpc-gateway/internal/BUILD.bazel | 22 - .../grpc-gateway/internal/stream_chunk.pb.go | 118 - .../grpc-gateway/internal/stream_chunk.proto | 15 - .../grpc-gateway/runtime/BUILD.bazel | 80 - .../grpc-ecosystem/grpc-gateway/runtime/context.go | 210 - .../grpc-ecosystem/grpc-gateway/runtime/convert.go | 312 - .../grpc-ecosystem/grpc-gateway/runtime/doc.go | 5 - .../grpc-ecosystem/grpc-gateway/runtime/errors.go | 145 - .../grpc-gateway/runtime/fieldmask.go | 70 - .../grpc-ecosystem/grpc-gateway/runtime/handler.go | 215 - .../grpc-gateway/runtime/marshal_httpbodyproto.go | 43 - .../grpc-gateway/runtime/marshal_json.go | 45 - .../grpc-gateway/runtime/marshal_jsonpb.go | 242 - .../grpc-gateway/runtime/marshal_proto.go | 62 - .../grpc-gateway/runtime/marshaler.go | 48 - .../grpc-gateway/runtime/marshaler_registry.go | 91 - .../grpc-ecosystem/grpc-gateway/runtime/mux.go | 268 - .../grpc-ecosystem/grpc-gateway/runtime/pattern.go | 227 - .../grpc-gateway/runtime/proto2_convert.go | 80 - .../grpc-gateway/runtime/proto_errors.go | 70 - .../grpc-ecosystem/grpc-gateway/runtime/query.go | 392 - .../grpc-gateway/utilities/BUILD.bazel | 21 - .../grpc-ecosystem/grpc-gateway/utilities/doc.go | 2 - .../grpc-gateway/utilities/pattern.go | 22 - .../grpc-gateway/utilities/readerfactory.go | 20 - .../grpc-ecosystem/grpc-gateway/utilities/trie.go | 177 - .../github.com/hashicorp/golang-lru/.gitignore | 23 - .../vendor/github.com/hashicorp/golang-lru/2q.go | 223 - .../vendor/github.com/hashicorp/golang-lru/LICENSE | 362 - .../github.com/hashicorp/golang-lru/README.md | 25 - .../vendor/github.com/hashicorp/golang-lru/arc.go | 257 - .../vendor/github.com/hashicorp/golang-lru/doc.go | 21 - .../vendor/github.com/hashicorp/golang-lru/go.mod | 1 - .../vendor/github.com/hashicorp/golang-lru/lru.go | 116 - .../hashicorp/golang-lru/simplelru/lru.go | 161 - .../golang-lru/simplelru/lru_interface.go | 36 - .../vendor/github.com/imdario/mergo/.gitignore | 33 - .../vendor/github.com/imdario/mergo/.travis.yml | 7 - .../github.com/imdario/mergo/CODE_OF_CONDUCT.md | 46 - .../vendor/github.com/imdario/mergo/LICENSE | 28 - .../vendor/github.com/imdario/mergo/README.md | 238 - .../vendor/github.com/imdario/mergo/doc.go | 44 - .../vendor/github.com/imdario/mergo/map.go | 175 - .../vendor/github.com/imdario/mergo/merge.go | 255 - .../vendor/github.com/imdario/mergo/mergo.go | 97 - .../vendor/github.com/joho/godotenv/.gitignore | 1 - .../vendor/github.com/joho/godotenv/.travis.yml | 8 - .../vendor/github.com/joho/godotenv/LICENCE | 23 - .../vendor/github.com/joho/godotenv/README.md | 163 - .../vendor/github.com/joho/godotenv/godotenv.go | 346 - .../github.com/json-iterator/go/.codecov.yml | 3 - .../vendor/github.com/json-iterator/go/.gitignore | 4 - .../vendor/github.com/json-iterator/go/.travis.yml | 14 - .../vendor/github.com/json-iterator/go/Gopkg.lock | 21 - .../vendor/github.com/json-iterator/go/Gopkg.toml | 26 - .../vendor/github.com/json-iterator/go/LICENSE | 21 - .../vendor/github.com/json-iterator/go/README.md | 87 - .../vendor/github.com/json-iterator/go/adapter.go | 150 - .../vendor/github.com/json-iterator/go/any.go | 325 - .../github.com/json-iterator/go/any_array.go | 278 - .../vendor/github.com/json-iterator/go/any_bool.go | 137 - .../github.com/json-iterator/go/any_float.go | 83 - .../github.com/json-iterator/go/any_int32.go | 74 - .../github.com/json-iterator/go/any_int64.go | 74 - .../github.com/json-iterator/go/any_invalid.go | 82 - .../vendor/github.com/json-iterator/go/any_nil.go | 69 - .../github.com/json-iterator/go/any_number.go | 123 - .../github.com/json-iterator/go/any_object.go | 374 - .../vendor/github.com/json-iterator/go/any_str.go | 166 - .../github.com/json-iterator/go/any_uint32.go | 74 - .../github.com/json-iterator/go/any_uint64.go | 74 - .../vendor/github.com/json-iterator/go/build.sh | 12 - .../vendor/github.com/json-iterator/go/config.go | 375 - .../json-iterator/go/fuzzy_mode_convert_table.md | 7 - .../vendor/github.com/json-iterator/go/iter.go | 322 - .../github.com/json-iterator/go/iter_array.go | 58 - .../github.com/json-iterator/go/iter_float.go | 339 - .../vendor/github.com/json-iterator/go/iter_int.go | 345 - .../github.com/json-iterator/go/iter_object.go | 251 - .../github.com/json-iterator/go/iter_skip.go | 129 - .../json-iterator/go/iter_skip_sloppy.go | 144 - .../json-iterator/go/iter_skip_strict.go | 99 - .../vendor/github.com/json-iterator/go/iter_str.go | 215 - .../vendor/github.com/json-iterator/go/jsoniter.go | 18 - .../vendor/github.com/json-iterator/go/pool.go | 42 - .../vendor/github.com/json-iterator/go/reflect.go | 332 - .../github.com/json-iterator/go/reflect_array.go | 104 - .../github.com/json-iterator/go/reflect_dynamic.go | 70 - .../json-iterator/go/reflect_extension.go | 483 - .../json-iterator/go/reflect_json_number.go | 112 - .../json-iterator/go/reflect_json_raw_message.go | 60 - .../github.com/json-iterator/go/reflect_map.go | 338 - .../json-iterator/go/reflect_marshaler.go | 217 - .../github.com/json-iterator/go/reflect_native.go | 451 - .../json-iterator/go/reflect_optional.go | 133 - .../github.com/json-iterator/go/reflect_slice.go | 99 - .../json-iterator/go/reflect_struct_decoder.go | 1048 - .../json-iterator/go/reflect_struct_encoder.go | 210 - .../vendor/github.com/json-iterator/go/stream.go | 211 - .../github.com/json-iterator/go/stream_float.go | 94 - .../github.com/json-iterator/go/stream_int.go | 190 - .../github.com/json-iterator/go/stream_str.go | 372 - .../vendor/github.com/json-iterator/go/test.sh | 12 - .../vendor/github.com/mailru/easyjson/LICENSE | 7 - .../github.com/mailru/easyjson/buffer/pool.go | 270 - .../mailru/easyjson/jlexer/bytestostr.go | 24 - .../mailru/easyjson/jlexer/bytestostr_nounsafe.go | 13 - .../github.com/mailru/easyjson/jlexer/error.go | 15 - .../github.com/mailru/easyjson/jlexer/lexer.go | 1182 - .../github.com/mailru/easyjson/jwriter/writer.go | 390 - .../vendor/github.com/markbates/inflect/.gitignore | 29 - .../markbates/inflect/.gometalinter.json | 3 - .../vendor/github.com/markbates/inflect/.hgignore | 1 - .../github.com/markbates/inflect/.travis.yml | 26 - .../vendor/github.com/markbates/inflect/LICENCE | 7 - .../vendor/github.com/markbates/inflect/Makefile | 55 - .../vendor/github.com/markbates/inflect/README.md | 214 - .../vendor/github.com/markbates/inflect/go.mod | 6 - .../vendor/github.com/markbates/inflect/go.sum | 10 - .../vendor/github.com/markbates/inflect/helpers.go | 19 - .../vendor/github.com/markbates/inflect/inflect.go | 892 - .../github.com/markbates/inflect/inflections.json | 4 - .../vendor/github.com/markbates/inflect/name.go | 163 - .../github.com/markbates/inflect/shoulders.md | 12 - .../vendor/github.com/markbates/inflect/version.go | 3 - .../matttproud/golang_protobuf_extensions/LICENSE | 201 - .../matttproud/golang_protobuf_extensions/NOTICE | 1 - .../golang_protobuf_extensions/pbutil/.gitignore | 1 - .../golang_protobuf_extensions/pbutil/Makefile | 7 - .../golang_protobuf_extensions/pbutil/decode.go | 75 - .../golang_protobuf_extensions/pbutil/doc.go | 16 - .../golang_protobuf_extensions/pbutil/encode.go | 46 - .../metal3-io/baremetal-operator/LICENSE | 201 - .../pkg/apis/addtoscheme_metal3_v1alpha1.go | 10 - .../metal3-io/baremetal-operator/pkg/apis/apis.go | 13 - .../apis/metal3/v1alpha1/baremetalhost_types.go | 654 - .../pkg/apis/metal3/v1alpha1/doc.go | 4 - .../pkg/apis/metal3/v1alpha1/register.go | 19 - .../apis/metal3/v1alpha1/zz_generated.deepcopy.go | 362 - .../apis/metal3/v1alpha1/zz_generated.defaults.go | 32 - .../apis/metal3/v1alpha1/zz_generated.openapi.go | 59 - .../github.com/modern-go/concurrent/.gitignore | 1 - .../github.com/modern-go/concurrent/.travis.yml | 14 - .../vendor/github.com/modern-go/concurrent/LICENSE | 201 - .../github.com/modern-go/concurrent/README.md | 49 - .../github.com/modern-go/concurrent/executor.go | 14 - .../github.com/modern-go/concurrent/go_above_19.go | 15 - .../github.com/modern-go/concurrent/go_below_19.go | 33 - .../vendor/github.com/modern-go/concurrent/log.go | 13 - .../vendor/github.com/modern-go/concurrent/test.sh | 12 - .../modern-go/concurrent/unbounded_executor.go | 119 - .../github.com/modern-go/reflect2/.gitignore | 2 - .../github.com/modern-go/reflect2/.travis.yml | 15 - .../github.com/modern-go/reflect2/Gopkg.lock | 15 - .../github.com/modern-go/reflect2/Gopkg.toml | 35 - .../vendor/github.com/modern-go/reflect2/LICENSE | 201 - .../vendor/github.com/modern-go/reflect2/README.md | 71 - .../github.com/modern-go/reflect2/go_above_17.go | 8 - .../github.com/modern-go/reflect2/go_above_19.go | 14 - .../github.com/modern-go/reflect2/go_below_17.go | 9 - .../github.com/modern-go/reflect2/go_below_19.go | 14 - .../github.com/modern-go/reflect2/reflect2.go | 298 - .../github.com/modern-go/reflect2/reflect2_amd64.s | 0 .../github.com/modern-go/reflect2/reflect2_kind.go | 30 - .../github.com/modern-go/reflect2/relfect2_386.s | 0 .../modern-go/reflect2/relfect2_amd64p32.s | 0 .../github.com/modern-go/reflect2/relfect2_arm.s | 0 .../github.com/modern-go/reflect2/relfect2_arm64.s | 0 .../modern-go/reflect2/relfect2_mips64x.s | 0 .../github.com/modern-go/reflect2/relfect2_mipsx.s | 0 .../modern-go/reflect2/relfect2_ppc64x.s | 0 .../github.com/modern-go/reflect2/relfect2_s390x.s | 0 .../github.com/modern-go/reflect2/safe_field.go | 58 - .../github.com/modern-go/reflect2/safe_map.go | 101 - .../github.com/modern-go/reflect2/safe_slice.go | 92 - .../github.com/modern-go/reflect2/safe_struct.go | 29 - .../github.com/modern-go/reflect2/safe_type.go | 78 - .../vendor/github.com/modern-go/reflect2/test.sh | 12 - .../github.com/modern-go/reflect2/type_map.go | 113 - .../github.com/modern-go/reflect2/unsafe_array.go | 65 - .../github.com/modern-go/reflect2/unsafe_eface.go | 59 - .../github.com/modern-go/reflect2/unsafe_field.go | 74 - .../github.com/modern-go/reflect2/unsafe_iface.go | 64 - .../github.com/modern-go/reflect2/unsafe_link.go | 70 - .../github.com/modern-go/reflect2/unsafe_map.go | 138 - .../github.com/modern-go/reflect2/unsafe_ptr.go | 46 - .../github.com/modern-go/reflect2/unsafe_slice.go | 177 - .../github.com/modern-go/reflect2/unsafe_struct.go | 59 - .../github.com/modern-go/reflect2/unsafe_type.go | 85 - .../operator-framework/operator-sdk/LICENSE | 201 - .../operator-sdk/pkg/k8sutil/constants.go | 34 - .../operator-sdk/pkg/k8sutil/k8sutil.go | 158 - .../operator-sdk/pkg/kube-metrics/collector.go | 80 - .../operator-sdk/pkg/kube-metrics/metrics.go | 93 - .../operator-sdk/pkg/kube-metrics/server.go | 71 - .../operator-sdk/pkg/kube-metrics/uclient.go | 102 - .../operator-sdk/pkg/leader/doc.go | 54 - .../operator-sdk/pkg/leader/leader.go | 145 - .../operator-sdk/pkg/log/zap/flags.go | 160 - .../operator-sdk/pkg/log/zap/logger.go | 89 - .../operator-sdk/pkg/metrics/metrics.go | 173 - .../operator-sdk/pkg/metrics/service-monitor.go | 94 - .../pkg/restmapper/dynamicrestmapper.go | 124 - .../operator-sdk/version/version.go | 21 - .../vendor/github.com/pborman/uuid/.travis.yml | 10 - .../vendor/github.com/pborman/uuid/CONTRIBUTING.md | 10 - .../vendor/github.com/pborman/uuid/CONTRIBUTORS | 1 - .../vendor/github.com/pborman/uuid/LICENSE | 27 - .../vendor/github.com/pborman/uuid/README.md | 15 - .../vendor/github.com/pborman/uuid/dce.go | 84 - .../vendor/github.com/pborman/uuid/doc.go | 13 - .../vendor/github.com/pborman/uuid/go.mod | 3 - .../vendor/github.com/pborman/uuid/go.sum | 2 - .../vendor/github.com/pborman/uuid/hash.go | 53 - .../vendor/github.com/pborman/uuid/marshal.go | 85 - .../vendor/github.com/pborman/uuid/node.go | 50 - .../vendor/github.com/pborman/uuid/sql.go | 68 - .../vendor/github.com/pborman/uuid/time.go | 57 - .../vendor/github.com/pborman/uuid/util.go | 32 - .../vendor/github.com/pborman/uuid/uuid.go | 162 - .../vendor/github.com/pborman/uuid/version1.go | 23 - .../vendor/github.com/pborman/uuid/version4.go | 26 - .../vendor/github.com/peterbourgon/diskv/LICENSE | 19 - .../vendor/github.com/peterbourgon/diskv/README.md | 141 - .../github.com/peterbourgon/diskv/compression.go | 64 - .../vendor/github.com/peterbourgon/diskv/diskv.go | 624 - .../vendor/github.com/peterbourgon/diskv/index.go | 115 - .../vendor/github.com/pkg/errors/.gitignore | 24 - .../vendor/github.com/pkg/errors/.travis.yml | 15 - .../vendor/github.com/pkg/errors/LICENSE | 23 - .../vendor/github.com/pkg/errors/README.md | 52 - .../vendor/github.com/pkg/errors/appveyor.yml | 32 - .../vendor/github.com/pkg/errors/errors.go | 282 - .../vendor/github.com/pkg/errors/stack.go | 147 - .../github.com/prometheus/client_golang/LICENSE | 201 - .../github.com/prometheus/client_golang/NOTICE | 23 - .../prometheus/client_golang/prometheus/.gitignore | 1 - .../prometheus/client_golang/prometheus/README.md | 1 - .../client_golang/prometheus/collector.go | 120 - .../prometheus/client_golang/prometheus/counter.go | 277 - .../prometheus/client_golang/prometheus/desc.go | 184 - .../prometheus/client_golang/prometheus/doc.go | 201 - .../client_golang/prometheus/expvar_collector.go | 119 - .../prometheus/client_golang/prometheus/fnv.go | 42 - .../prometheus/client_golang/prometheus/gauge.go | 286 - .../client_golang/prometheus/go_collector.go | 301 - .../client_golang/prometheus/histogram.go | 614 - .../prometheus/client_golang/prometheus/http.go | 503 - .../client_golang/prometheus/internal/metric.go | 85 - .../prometheus/client_golang/prometheus/labels.go | 87 - .../prometheus/client_golang/prometheus/metric.go | 174 - .../client_golang/prometheus/observer.go | 52 - .../client_golang/prometheus/process_collector.go | 204 - .../client_golang/prometheus/promhttp/delegator.go | 198 - .../prometheus/promhttp/delegator_1_8.go | 181 - .../prometheus/promhttp/delegator_pre_1_8.go | 44 - .../client_golang/prometheus/promhttp/http.go | 310 - .../prometheus/promhttp/instrument_client.go | 97 - .../prometheus/promhttp/instrument_client_1_8.go | 144 - .../prometheus/promhttp/instrument_server.go | 447 - .../client_golang/prometheus/registry.go | 937 - .../prometheus/client_golang/prometheus/summary.go | 775 - .../prometheus/client_golang/prometheus/timer.go | 54 - .../prometheus/client_golang/prometheus/untyped.go | 42 - .../prometheus/client_golang/prometheus/value.go | 162 - .../prometheus/client_golang/prometheus/vec.go | 472 - .../prometheus/client_golang/prometheus/wrap.go | 179 - .../github.com/prometheus/client_model/LICENSE | 201 - .../github.com/prometheus/client_model/NOTICE | 5 - .../prometheus/client_model/go/metrics.pb.go | 629 - .../vendor/github.com/prometheus/common/LICENSE | 201 - .../vendor/github.com/prometheus/common/NOTICE | 5 - .../github.com/prometheus/common/expfmt/decode.go | 429 - .../github.com/prometheus/common/expfmt/encode.go | 88 - .../github.com/prometheus/common/expfmt/expfmt.go | 38 - .../github.com/prometheus/common/expfmt/fuzz.go | 36 - .../prometheus/common/expfmt/text_create.go | 468 - .../prometheus/common/expfmt/text_parse.go | 757 - .../internal/bitbucket.org/ww/goautoneg/README.txt | 67 - .../internal/bitbucket.org/ww/goautoneg/autoneg.go | 162 - .../github.com/prometheus/common/model/alert.go | 136 - .../prometheus/common/model/fingerprinting.go | 105 - .../github.com/prometheus/common/model/fnv.go | 42 - .../github.com/prometheus/common/model/labels.go | 210 - .../github.com/prometheus/common/model/labelset.go | 169 - .../github.com/prometheus/common/model/metric.go | 102 - .../github.com/prometheus/common/model/model.go | 16 - .../prometheus/common/model/signature.go | 144 - .../github.com/prometheus/common/model/silence.go | 106 - .../github.com/prometheus/common/model/time.go | 264 - .../github.com/prometheus/common/model/value.go | 416 - .../vendor/github.com/prometheus/procfs/.gitignore | 1 - .../github.com/prometheus/procfs/CONTRIBUTING.md | 18 - .../vendor/github.com/prometheus/procfs/LICENSE | 201 - .../github.com/prometheus/procfs/MAINTAINERS.md | 2 - .../vendor/github.com/prometheus/procfs/Makefile | 28 - .../github.com/prometheus/procfs/Makefile.common | 223 - .../vendor/github.com/prometheus/procfs/NOTICE | 7 - .../vendor/github.com/prometheus/procfs/README.md | 11 - .../github.com/prometheus/procfs/buddyinfo.go | 95 - .../vendor/github.com/prometheus/procfs/doc.go | 45 - .../github.com/prometheus/procfs/fixtures.ttar | 1714 - .../vendor/github.com/prometheus/procfs/fs.go | 46 - .../vendor/github.com/prometheus/procfs/go.mod | 3 - .../vendor/github.com/prometheus/procfs/go.sum | 2 - .../vendor/github.com/prometheus/procfs/ipvs.go | 259 - .../vendor/github.com/prometheus/procfs/mdstat.go | 151 - .../github.com/prometheus/procfs/mountstats.go | 616 - .../vendor/github.com/prometheus/procfs/net_dev.go | 216 - .../vendor/github.com/prometheus/procfs/proc.go | 258 - .../vendor/github.com/prometheus/procfs/proc_io.go | 65 - .../github.com/prometheus/procfs/proc_limits.go | 150 - .../vendor/github.com/prometheus/procfs/proc_ns.go | 68 - .../github.com/prometheus/procfs/proc_psi.go | 110 - .../github.com/prometheus/procfs/proc_stat.go | 188 - .../vendor/github.com/prometheus/procfs/stat.go | 232 - .../vendor/github.com/prometheus/procfs/ttar | 389 - .../vendor/github.com/prometheus/procfs/xfrm.go | 187 - .../vendor/github.com/rogpeppe/go-internal/LICENSE | 27 - .../rogpeppe/go-internal/modfile/gopkgin.go | 47 - .../rogpeppe/go-internal/modfile/print.go | 164 - .../rogpeppe/go-internal/modfile/read.go | 869 - .../rogpeppe/go-internal/modfile/rule.go | 724 - .../rogpeppe/go-internal/module/module.go | 540 - .../rogpeppe/go-internal/semver/semver.go | 388 - .../vendor/github.com/spf13/afero/.travis.yml | 21 - .../vendor/github.com/spf13/afero/LICENSE.txt | 174 - .../vendor/github.com/spf13/afero/README.md | 452 - .../vendor/github.com/spf13/afero/afero.go | 108 - .../vendor/github.com/spf13/afero/appveyor.yml | 15 - .../vendor/github.com/spf13/afero/basepath.go | 180 - .../vendor/github.com/spf13/afero/cacheOnReadFs.go | 290 - .../vendor/github.com/spf13/afero/const_bsds.go | 22 - .../github.com/spf13/afero/const_win_unix.go | 25 - .../vendor/github.com/spf13/afero/copyOnWriteFs.go | 293 - .../vendor/github.com/spf13/afero/go.mod | 3 - .../vendor/github.com/spf13/afero/go.sum | 2 - .../vendor/github.com/spf13/afero/httpFs.go | 110 - .../vendor/github.com/spf13/afero/ioutil.go | 230 - .../vendor/github.com/spf13/afero/lstater.go | 27 - .../vendor/github.com/spf13/afero/match.go | 110 - .../vendor/github.com/spf13/afero/mem/dir.go | 37 - .../vendor/github.com/spf13/afero/mem/dirmap.go | 43 - .../vendor/github.com/spf13/afero/mem/file.go | 317 - .../vendor/github.com/spf13/afero/memmap.go | 365 - .../vendor/github.com/spf13/afero/os.go | 101 - .../vendor/github.com/spf13/afero/path.go | 106 - .../vendor/github.com/spf13/afero/readonlyfs.go | 80 - .../vendor/github.com/spf13/afero/regexpfs.go | 214 - .../vendor/github.com/spf13/afero/unionFile.go | 320 - .../vendor/github.com/spf13/afero/util.go | 330 - .../vendor/github.com/spf13/pflag/.gitignore | 2 - .../vendor/github.com/spf13/pflag/.travis.yml | 21 - .../vendor/github.com/spf13/pflag/LICENSE | 28 - .../vendor/github.com/spf13/pflag/README.md | 296 - .../vendor/github.com/spf13/pflag/bool.go | 94 - .../vendor/github.com/spf13/pflag/bool_slice.go | 147 - .../vendor/github.com/spf13/pflag/bytes.go | 209 - .../vendor/github.com/spf13/pflag/count.go | 96 - .../vendor/github.com/spf13/pflag/duration.go | 86 - .../github.com/spf13/pflag/duration_slice.go | 128 - .../vendor/github.com/spf13/pflag/flag.go | 1227 - .../vendor/github.com/spf13/pflag/float32.go | 88 - .../vendor/github.com/spf13/pflag/float64.go | 84 - .../vendor/github.com/spf13/pflag/golangflag.go | 105 - .../vendor/github.com/spf13/pflag/int.go | 84 - .../vendor/github.com/spf13/pflag/int16.go | 88 - .../vendor/github.com/spf13/pflag/int32.go | 88 - .../vendor/github.com/spf13/pflag/int64.go | 84 - .../vendor/github.com/spf13/pflag/int8.go | 88 - .../vendor/github.com/spf13/pflag/int_slice.go | 128 - .../vendor/github.com/spf13/pflag/ip.go | 94 - .../vendor/github.com/spf13/pflag/ip_slice.go | 148 - .../vendor/github.com/spf13/pflag/ipmask.go | 122 - .../vendor/github.com/spf13/pflag/ipnet.go | 98 - .../vendor/github.com/spf13/pflag/string.go | 80 - .../vendor/github.com/spf13/pflag/string_array.go | 103 - .../vendor/github.com/spf13/pflag/string_slice.go | 149 - .../vendor/github.com/spf13/pflag/string_to_int.go | 149 - .../github.com/spf13/pflag/string_to_string.go | 160 - .../vendor/github.com/spf13/pflag/uint.go | 88 - .../vendor/github.com/spf13/pflag/uint16.go | 88 - .../vendor/github.com/spf13/pflag/uint32.go | 88 - .../vendor/github.com/spf13/pflag/uint64.go | 88 - .../vendor/github.com/spf13/pflag/uint8.go | 88 - .../vendor/github.com/spf13/pflag/uint_slice.go | 126 - .../vendor/go.opencensus.io/.gitignore | 9 - .../vendor/go.opencensus.io/.travis.yml | 17 - cmd/bpa-operator/vendor/go.opencensus.io/AUTHORS | 1 - .../vendor/go.opencensus.io/CONTRIBUTING.md | 63 - .../vendor/go.opencensus.io/Gopkg.lock | 231 - .../vendor/go.opencensus.io/Gopkg.toml | 36 - cmd/bpa-operator/vendor/go.opencensus.io/LICENSE | 202 - cmd/bpa-operator/vendor/go.opencensus.io/Makefile | 95 - cmd/bpa-operator/vendor/go.opencensus.io/README.md | 263 - .../vendor/go.opencensus.io/appveyor.yml | 25 - cmd/bpa-operator/vendor/go.opencensus.io/go.mod | 13 - cmd/bpa-operator/vendor/go.opencensus.io/go.sum | 150 - .../vendor/go.opencensus.io/internal/internal.go | 37 - .../vendor/go.opencensus.io/internal/sanitize.go | 50 - .../internal/tagencoding/tagencoding.go | 75 - .../go.opencensus.io/internal/traceinternals.go | 53 - .../go.opencensus.io/metric/metricdata/doc.go | 19 - .../go.opencensus.io/metric/metricdata/exemplar.go | 33 - .../go.opencensus.io/metric/metricdata/label.go | 28 - .../go.opencensus.io/metric/metricdata/metric.go | 46 - .../go.opencensus.io/metric/metricdata/point.go | 193 - .../metric/metricdata/type_string.go | 16 - .../go.opencensus.io/metric/metricdata/unit.go | 27 - .../metric/metricproducer/manager.go | 78 - .../metric/metricproducer/producer.go | 28 - .../vendor/go.opencensus.io/opencensus.go | 21 - .../go.opencensus.io/plugin/ocgrpc/client.go | 56 - .../plugin/ocgrpc/client_metrics.go | 107 - .../plugin/ocgrpc/client_stats_handler.go | 49 - .../vendor/go.opencensus.io/plugin/ocgrpc/doc.go | 19 - .../go.opencensus.io/plugin/ocgrpc/server.go | 80 - .../plugin/ocgrpc/server_metrics.go | 97 - .../plugin/ocgrpc/server_stats_handler.go | 63 - .../go.opencensus.io/plugin/ocgrpc/stats_common.go | 208 - .../go.opencensus.io/plugin/ocgrpc/trace_common.go | 107 - .../go.opencensus.io/plugin/ochttp/client.go | 117 - .../go.opencensus.io/plugin/ochttp/client_stats.go | 143 - .../vendor/go.opencensus.io/plugin/ochttp/doc.go | 19 - .../plugin/ochttp/propagation/b3/b3.go | 123 - .../ochttp/propagation/tracecontext/propagation.go | 187 - .../vendor/go.opencensus.io/plugin/ochttp/route.go | 61 - .../go.opencensus.io/plugin/ochttp/server.go | 440 - .../plugin/ochttp/span_annotating_client_trace.go | 169 - .../vendor/go.opencensus.io/plugin/ochttp/stats.go | 292 - .../vendor/go.opencensus.io/plugin/ochttp/trace.go | 239 - .../go.opencensus.io/plugin/ochttp/wrapped_body.go | 44 - .../vendor/go.opencensus.io/resource/resource.go | 164 - .../vendor/go.opencensus.io/stats/doc.go | 69 - .../go.opencensus.io/stats/internal/record.go | 25 - .../vendor/go.opencensus.io/stats/measure.go | 109 - .../go.opencensus.io/stats/measure_float64.go | 55 - .../vendor/go.opencensus.io/stats/measure_int64.go | 55 - .../vendor/go.opencensus.io/stats/record.go | 69 - .../vendor/go.opencensus.io/stats/units.go | 25 - .../go.opencensus.io/stats/view/aggregation.go | 120 - .../stats/view/aggregation_data.go | 293 - .../go.opencensus.io/stats/view/collector.go | 86 - .../vendor/go.opencensus.io/stats/view/doc.go | 47 - .../vendor/go.opencensus.io/stats/view/export.go | 58 - .../vendor/go.opencensus.io/stats/view/view.go | 221 - .../go.opencensus.io/stats/view/view_to_metric.go | 131 - .../vendor/go.opencensus.io/stats/view/worker.go | 279 - .../go.opencensus.io/stats/view/worker_commands.go | 182 - .../vendor/go.opencensus.io/tag/context.go | 43 - .../vendor/go.opencensus.io/tag/doc.go | 26 - .../vendor/go.opencensus.io/tag/key.go | 35 - .../vendor/go.opencensus.io/tag/map.go | 197 - .../vendor/go.opencensus.io/tag/map_codec.go | 237 - .../vendor/go.opencensus.io/tag/profile_19.go | 31 - .../vendor/go.opencensus.io/tag/profile_not19.go | 23 - .../vendor/go.opencensus.io/tag/validate.go | 56 - .../vendor/go.opencensus.io/trace/basetypes.go | 119 - .../vendor/go.opencensus.io/trace/config.go | 86 - .../vendor/go.opencensus.io/trace/doc.go | 53 - .../vendor/go.opencensus.io/trace/evictedqueue.go | 38 - .../vendor/go.opencensus.io/trace/export.go | 97 - .../go.opencensus.io/trace/internal/internal.go | 22 - .../vendor/go.opencensus.io/trace/lrumap.go | 37 - .../trace/propagation/propagation.go | 108 - .../vendor/go.opencensus.io/trace/sampling.go | 75 - .../vendor/go.opencensus.io/trace/spanbucket.go | 130 - .../vendor/go.opencensus.io/trace/spanstore.go | 306 - .../vendor/go.opencensus.io/trace/status_codes.go | 37 - .../vendor/go.opencensus.io/trace/trace.go | 598 - .../vendor/go.opencensus.io/trace/trace_go11.go | 32 - .../vendor/go.opencensus.io/trace/trace_nongo11.go | 25 - .../trace/tracestate/tracestate.go | 147 - .../vendor/go.uber.org/atomic/.codecov.yml | 15 - .../vendor/go.uber.org/atomic/.gitignore | 11 - .../vendor/go.uber.org/atomic/.travis.yml | 23 - .../vendor/go.uber.org/atomic/LICENSE.txt | 19 - .../vendor/go.uber.org/atomic/Makefile | 64 - .../vendor/go.uber.org/atomic/README.md | 36 - .../vendor/go.uber.org/atomic/atomic.go | 351 - .../vendor/go.uber.org/atomic/glide.lock | 17 - .../vendor/go.uber.org/atomic/glide.yaml | 6 - .../vendor/go.uber.org/atomic/string.go | 49 - .../vendor/go.uber.org/multierr/.codecov.yml | 15 - .../vendor/go.uber.org/multierr/.gitignore | 1 - .../vendor/go.uber.org/multierr/.travis.yml | 33 - .../vendor/go.uber.org/multierr/CHANGELOG.md | 28 - .../vendor/go.uber.org/multierr/LICENSE.txt | 19 - .../vendor/go.uber.org/multierr/Makefile | 74 - .../vendor/go.uber.org/multierr/README.md | 23 - .../vendor/go.uber.org/multierr/error.go | 401 - .../vendor/go.uber.org/multierr/glide.lock | 19 - .../vendor/go.uber.org/multierr/glide.yaml | 8 - .../vendor/go.uber.org/zap/.codecov.yml | 17 - cmd/bpa-operator/vendor/go.uber.org/zap/.gitignore | 28 - .../vendor/go.uber.org/zap/.readme.tmpl | 108 - .../vendor/go.uber.org/zap/.travis.yml | 21 - .../vendor/go.uber.org/zap/CHANGELOG.md | 305 - .../vendor/go.uber.org/zap/CODE_OF_CONDUCT.md | 75 - .../vendor/go.uber.org/zap/CONTRIBUTING.md | 81 - cmd/bpa-operator/vendor/go.uber.org/zap/FAQ.md | 155 - .../vendor/go.uber.org/zap/LICENSE.txt | 19 - cmd/bpa-operator/vendor/go.uber.org/zap/Makefile | 76 - cmd/bpa-operator/vendor/go.uber.org/zap/README.md | 136 - cmd/bpa-operator/vendor/go.uber.org/zap/array.go | 320 - .../vendor/go.uber.org/zap/buffer/buffer.go | 115 - .../vendor/go.uber.org/zap/buffer/pool.go | 49 - .../vendor/go.uber.org/zap/check_license.sh | 17 - cmd/bpa-operator/vendor/go.uber.org/zap/config.go | 243 - cmd/bpa-operator/vendor/go.uber.org/zap/doc.go | 113 - cmd/bpa-operator/vendor/go.uber.org/zap/encoder.go | 75 - cmd/bpa-operator/vendor/go.uber.org/zap/error.go | 80 - cmd/bpa-operator/vendor/go.uber.org/zap/field.go | 310 - cmd/bpa-operator/vendor/go.uber.org/zap/flag.go | 39 - cmd/bpa-operator/vendor/go.uber.org/zap/glide.lock | 76 - cmd/bpa-operator/vendor/go.uber.org/zap/glide.yaml | 35 - cmd/bpa-operator/vendor/go.uber.org/zap/global.go | 169 - .../vendor/go.uber.org/zap/http_handler.go | 81 - .../zap/internal/bufferpool/bufferpool.go | 31 - .../vendor/go.uber.org/zap/internal/color/color.go | 44 - .../vendor/go.uber.org/zap/internal/exit/exit.go | 64 - cmd/bpa-operator/vendor/go.uber.org/zap/level.go | 132 - cmd/bpa-operator/vendor/go.uber.org/zap/logger.go | 305 - cmd/bpa-operator/vendor/go.uber.org/zap/options.go | 109 - cmd/bpa-operator/vendor/go.uber.org/zap/sink.go | 161 - .../vendor/go.uber.org/zap/stacktrace.go | 126 - cmd/bpa-operator/vendor/go.uber.org/zap/sugar.go | 304 - cmd/bpa-operator/vendor/go.uber.org/zap/time.go | 27 - cmd/bpa-operator/vendor/go.uber.org/zap/writer.go | 99 - .../go.uber.org/zap/zapcore/console_encoder.go | 147 - .../vendor/go.uber.org/zap/zapcore/core.go | 113 - .../vendor/go.uber.org/zap/zapcore/doc.go | 24 - .../vendor/go.uber.org/zap/zapcore/encoder.go | 348 - .../vendor/go.uber.org/zap/zapcore/entry.go | 257 - .../vendor/go.uber.org/zap/zapcore/error.go | 120 - .../vendor/go.uber.org/zap/zapcore/field.go | 201 - .../vendor/go.uber.org/zap/zapcore/hook.go | 68 - .../vendor/go.uber.org/zap/zapcore/json_encoder.go | 502 - .../vendor/go.uber.org/zap/zapcore/level.go | 175 - .../go.uber.org/zap/zapcore/level_strings.go | 46 - .../vendor/go.uber.org/zap/zapcore/marshaler.go | 53 - .../go.uber.org/zap/zapcore/memory_encoder.go | 179 - .../vendor/go.uber.org/zap/zapcore/sampler.go | 134 - .../vendor/go.uber.org/zap/zapcore/tee.go | 81 - .../vendor/go.uber.org/zap/zapcore/write_syncer.go | 123 - .../vendor/golang.org/x/crypto/AUTHORS | 3 - .../vendor/golang.org/x/crypto/CONTRIBUTORS | 3 - .../vendor/golang.org/x/crypto/LICENSE | 27 - .../vendor/golang.org/x/crypto/PATENTS | 22 - .../golang.org/x/crypto/curve25519/const_amd64.h | 8 - .../golang.org/x/crypto/curve25519/const_amd64.s | 20 - .../golang.org/x/crypto/curve25519/cswap_amd64.s | 65 - .../golang.org/x/crypto/curve25519/curve25519.go | 834 - .../vendor/golang.org/x/crypto/curve25519/doc.go | 23 - .../golang.org/x/crypto/curve25519/freeze_amd64.s | 73 - .../x/crypto/curve25519/ladderstep_amd64.s | 1377 - .../x/crypto/curve25519/mont25519_amd64.go | 240 - .../golang.org/x/crypto/curve25519/mul_amd64.s | 169 - .../golang.org/x/crypto/curve25519/square_amd64.s | 132 - .../vendor/golang.org/x/crypto/ed25519/ed25519.go | 217 - .../crypto/ed25519/internal/edwards25519/const.go | 1422 - .../ed25519/internal/edwards25519/edwards25519.go | 1793 - .../x/crypto/internal/chacha20/asm_arm64.s | 308 - .../x/crypto/internal/chacha20/chacha_arm64.go | 31 - .../x/crypto/internal/chacha20/chacha_generic.go | 264 - .../x/crypto/internal/chacha20/chacha_noasm.go | 16 - .../x/crypto/internal/chacha20/chacha_s390x.go | 29 - .../x/crypto/internal/chacha20/chacha_s390x.s | 260 - .../golang.org/x/crypto/internal/chacha20/xor.go | 43 - .../x/crypto/internal/subtle/aliasing.go | 32 - .../x/crypto/internal/subtle/aliasing_appengine.go | 35 - .../golang.org/x/crypto/poly1305/mac_noasm.go | 11 - .../golang.org/x/crypto/poly1305/poly1305.go | 83 - .../golang.org/x/crypto/poly1305/sum_amd64.go | 68 - .../golang.org/x/crypto/poly1305/sum_amd64.s | 148 - .../vendor/golang.org/x/crypto/poly1305/sum_arm.go | 22 - .../vendor/golang.org/x/crypto/poly1305/sum_arm.s | 427 - .../golang.org/x/crypto/poly1305/sum_generic.go | 172 - .../golang.org/x/crypto/poly1305/sum_noasm.go | 16 - .../golang.org/x/crypto/poly1305/sum_s390x.go | 42 - .../golang.org/x/crypto/poly1305/sum_s390x.s | 378 - .../golang.org/x/crypto/poly1305/sum_vmsl_s390x.s | 909 - .../vendor/golang.org/x/crypto/ssh/buffer.go | 97 - .../vendor/golang.org/x/crypto/ssh/certs.go | 535 - .../vendor/golang.org/x/crypto/ssh/channel.go | 633 - .../vendor/golang.org/x/crypto/ssh/cipher.go | 770 - .../vendor/golang.org/x/crypto/ssh/client.go | 278 - .../vendor/golang.org/x/crypto/ssh/client_auth.go | 525 - .../vendor/golang.org/x/crypto/ssh/common.go | 383 - .../vendor/golang.org/x/crypto/ssh/connection.go | 143 - .../vendor/golang.org/x/crypto/ssh/doc.go | 21 - .../vendor/golang.org/x/crypto/ssh/handshake.go | 646 - .../vendor/golang.org/x/crypto/ssh/kex.go | 540 - .../vendor/golang.org/x/crypto/ssh/keys.go | 1100 - .../vendor/golang.org/x/crypto/ssh/mac.go | 61 - .../vendor/golang.org/x/crypto/ssh/messages.go | 766 - .../vendor/golang.org/x/crypto/ssh/mux.go | 330 - .../vendor/golang.org/x/crypto/ssh/server.go | 594 - .../vendor/golang.org/x/crypto/ssh/session.go | 647 - .../vendor/golang.org/x/crypto/ssh/streamlocal.go | 116 - .../vendor/golang.org/x/crypto/ssh/tcpip.go | 474 - .../golang.org/x/crypto/ssh/terminal/terminal.go | 966 - .../golang.org/x/crypto/ssh/terminal/util.go | 114 - .../golang.org/x/crypto/ssh/terminal/util_aix.go | 12 - .../golang.org/x/crypto/ssh/terminal/util_bsd.go | 12 - .../golang.org/x/crypto/ssh/terminal/util_linux.go | 10 - .../golang.org/x/crypto/ssh/terminal/util_plan9.go | 58 - .../x/crypto/ssh/terminal/util_solaris.go | 124 - .../x/crypto/ssh/terminal/util_windows.go | 105 - .../vendor/golang.org/x/crypto/ssh/transport.go | 353 - cmd/bpa-operator/vendor/golang.org/x/net/AUTHORS | 3 - .../vendor/golang.org/x/net/CONTRIBUTORS | 3 - cmd/bpa-operator/vendor/golang.org/x/net/LICENSE | 27 - cmd/bpa-operator/vendor/golang.org/x/net/PATENTS | 22 - .../vendor/golang.org/x/net/context/context.go | 56 - .../golang.org/x/net/context/ctxhttp/ctxhttp.go | 71 - .../vendor/golang.org/x/net/context/go17.go | 72 - .../vendor/golang.org/x/net/context/go19.go | 20 - .../vendor/golang.org/x/net/context/pre_go17.go | 300 - .../vendor/golang.org/x/net/context/pre_go19.go | 109 - .../vendor/golang.org/x/net/http/httpguts/guts.go | 50 - .../golang.org/x/net/http/httpguts/httplex.go | 346 - .../vendor/golang.org/x/net/http2/.gitignore | 2 - .../vendor/golang.org/x/net/http2/Dockerfile | 51 - .../vendor/golang.org/x/net/http2/Makefile | 3 - .../vendor/golang.org/x/net/http2/README | 20 - .../vendor/golang.org/x/net/http2/ciphers.go | 641 - .../golang.org/x/net/http2/client_conn_pool.go | 282 - .../vendor/golang.org/x/net/http2/databuffer.go | 146 - .../vendor/golang.org/x/net/http2/errors.go | 133 - .../vendor/golang.org/x/net/http2/flow.go | 50 - .../vendor/golang.org/x/net/http2/frame.go | 1614 - .../vendor/golang.org/x/net/http2/go111.go | 29 - .../vendor/golang.org/x/net/http2/gotrack.go | 170 - .../vendor/golang.org/x/net/http2/headermap.go | 88 - .../vendor/golang.org/x/net/http2/hpack/encode.go | 240 - .../vendor/golang.org/x/net/http2/hpack/hpack.go | 504 - .../vendor/golang.org/x/net/http2/hpack/huffman.go | 222 - .../vendor/golang.org/x/net/http2/hpack/tables.go | 479 - .../vendor/golang.org/x/net/http2/http2.go | 384 - .../vendor/golang.org/x/net/http2/not_go111.go | 20 - .../vendor/golang.org/x/net/http2/pipe.go | 163 - .../vendor/golang.org/x/net/http2/server.go | 2895 - .../vendor/golang.org/x/net/http2/transport.go | 2607 - .../vendor/golang.org/x/net/http2/write.go | 365 - .../vendor/golang.org/x/net/http2/writesched.go | 242 - .../golang.org/x/net/http2/writesched_priority.go | 452 - .../golang.org/x/net/http2/writesched_random.go | 72 - .../vendor/golang.org/x/net/idna/idna.go | 732 - .../vendor/golang.org/x/net/idna/punycode.go | 203 - .../vendor/golang.org/x/net/idna/tables.go | 4557 -- .../vendor/golang.org/x/net/idna/trie.go | 72 - .../vendor/golang.org/x/net/idna/trieval.go | 119 - .../x/net/internal/timeseries/timeseries.go | 525 - .../vendor/golang.org/x/net/trace/events.go | 532 - .../vendor/golang.org/x/net/trace/histogram.go | 365 - .../vendor/golang.org/x/net/trace/trace.go | 1130 - .../vendor/golang.org/x/oauth2/.travis.yml | 13 - .../vendor/golang.org/x/oauth2/AUTHORS | 3 - .../vendor/golang.org/x/oauth2/CONTRIBUTING.md | 26 - .../vendor/golang.org/x/oauth2/CONTRIBUTORS | 3 - .../vendor/golang.org/x/oauth2/LICENSE | 27 - .../vendor/golang.org/x/oauth2/README.md | 35 - cmd/bpa-operator/vendor/golang.org/x/oauth2/go.mod | 10 - cmd/bpa-operator/vendor/golang.org/x/oauth2/go.sum | 12 - .../vendor/golang.org/x/oauth2/google/appengine.go | 38 - .../golang.org/x/oauth2/google/appengine_gen1.go | 77 - .../x/oauth2/google/appengine_gen2_flex.go | 27 - .../vendor/golang.org/x/oauth2/google/default.go | 154 - .../vendor/golang.org/x/oauth2/google/doc.go | 40 - .../vendor/golang.org/x/oauth2/google/google.go | 202 - .../vendor/golang.org/x/oauth2/google/jwt.go | 74 - .../vendor/golang.org/x/oauth2/google/sdk.go | 201 - .../x/oauth2/internal/client_appengine.go | 13 - .../vendor/golang.org/x/oauth2/internal/doc.go | 6 - .../vendor/golang.org/x/oauth2/internal/oauth2.go | 37 - .../vendor/golang.org/x/oauth2/internal/token.go | 294 - .../golang.org/x/oauth2/internal/transport.go | 33 - .../vendor/golang.org/x/oauth2/jws/jws.go | 182 - .../vendor/golang.org/x/oauth2/jwt/jwt.go | 170 - .../vendor/golang.org/x/oauth2/oauth2.go | 381 - .../vendor/golang.org/x/oauth2/token.go | 178 - .../vendor/golang.org/x/oauth2/transport.go | 144 - cmd/bpa-operator/vendor/golang.org/x/sync/AUTHORS | 3 - .../vendor/golang.org/x/sync/CONTRIBUTORS | 3 - cmd/bpa-operator/vendor/golang.org/x/sync/LICENSE | 27 - cmd/bpa-operator/vendor/golang.org/x/sync/PATENTS | 22 - .../golang.org/x/sync/semaphore/semaphore.go | 127 - cmd/bpa-operator/vendor/golang.org/x/sys/AUTHORS | 3 - .../vendor/golang.org/x/sys/CONTRIBUTORS | 3 - cmd/bpa-operator/vendor/golang.org/x/sys/LICENSE | 27 - cmd/bpa-operator/vendor/golang.org/x/sys/PATENTS | 22 - .../vendor/golang.org/x/sys/cpu/byteorder.go | 30 - .../vendor/golang.org/x/sys/cpu/cpu.go | 126 - .../vendor/golang.org/x/sys/cpu/cpu_aix_ppc64.go | 30 - .../vendor/golang.org/x/sys/cpu/cpu_arm.go | 9 - .../vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go | 21 - .../vendor/golang.org/x/sys/cpu/cpu_gc_x86.go | 16 - .../vendor/golang.org/x/sys/cpu/cpu_gccgo.c | 43 - .../vendor/golang.org/x/sys/cpu/cpu_gccgo.go | 26 - .../vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go | 22 - .../vendor/golang.org/x/sys/cpu/cpu_linux.go | 59 - .../vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go | 67 - .../golang.org/x/sys/cpu/cpu_linux_ppc64x.go | 33 - .../vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go | 161 - .../vendor/golang.org/x/sys/cpu/cpu_mips64x.go | 11 - .../vendor/golang.org/x/sys/cpu/cpu_mipsx.go | 11 - .../vendor/golang.org/x/sys/cpu/cpu_other_arm64.go | 11 - .../vendor/golang.org/x/sys/cpu/cpu_s390x.s | 57 - .../vendor/golang.org/x/sys/cpu/cpu_wasm.go | 15 - .../vendor/golang.org/x/sys/cpu/cpu_x86.go | 59 - .../vendor/golang.org/x/sys/cpu/cpu_x86.s | 27 - .../vendor/golang.org/x/sys/unix/.gitignore | 2 - .../vendor/golang.org/x/sys/unix/README.md | 173 - .../vendor/golang.org/x/sys/unix/affinity_linux.go | 124 - .../vendor/golang.org/x/sys/unix/aliases.go | 14 - .../vendor/golang.org/x/sys/unix/asm_aix_ppc64.s | 17 - .../vendor/golang.org/x/sys/unix/asm_darwin_386.s | 29 - .../golang.org/x/sys/unix/asm_darwin_amd64.s | 29 - .../vendor/golang.org/x/sys/unix/asm_darwin_arm.s | 30 - .../golang.org/x/sys/unix/asm_darwin_arm64.s | 30 - .../golang.org/x/sys/unix/asm_dragonfly_amd64.s | 29 - .../vendor/golang.org/x/sys/unix/asm_freebsd_386.s | 29 - .../golang.org/x/sys/unix/asm_freebsd_amd64.s | 29 - .../vendor/golang.org/x/sys/unix/asm_freebsd_arm.s | 29 - .../golang.org/x/sys/unix/asm_freebsd_arm64.s | 29 - .../vendor/golang.org/x/sys/unix/asm_linux_386.s | 65 - .../vendor/golang.org/x/sys/unix/asm_linux_amd64.s | 57 - .../vendor/golang.org/x/sys/unix/asm_linux_arm.s | 56 - .../vendor/golang.org/x/sys/unix/asm_linux_arm64.s | 52 - .../golang.org/x/sys/unix/asm_linux_mips64x.s | 56 - .../vendor/golang.org/x/sys/unix/asm_linux_mipsx.s | 54 - .../golang.org/x/sys/unix/asm_linux_ppc64x.s | 44 - .../vendor/golang.org/x/sys/unix/asm_linux_s390x.s | 56 - .../vendor/golang.org/x/sys/unix/asm_netbsd_386.s | 29 - .../golang.org/x/sys/unix/asm_netbsd_amd64.s | 29 - .../vendor/golang.org/x/sys/unix/asm_netbsd_arm.s | 29 - .../golang.org/x/sys/unix/asm_netbsd_arm64.s | 29 - .../vendor/golang.org/x/sys/unix/asm_openbsd_386.s | 29 - .../golang.org/x/sys/unix/asm_openbsd_amd64.s | 29 - .../vendor/golang.org/x/sys/unix/asm_openbsd_arm.s | 29 - .../golang.org/x/sys/unix/asm_solaris_amd64.s | 17 - .../golang.org/x/sys/unix/bluetooth_linux.go | 35 - .../vendor/golang.org/x/sys/unix/cap_freebsd.go | 195 - .../vendor/golang.org/x/sys/unix/constants.go | 13 - .../vendor/golang.org/x/sys/unix/dev_aix_ppc.go | 27 - .../vendor/golang.org/x/sys/unix/dev_aix_ppc64.go | 29 - .../vendor/golang.org/x/sys/unix/dev_darwin.go | 24 - .../vendor/golang.org/x/sys/unix/dev_dragonfly.go | 30 - .../vendor/golang.org/x/sys/unix/dev_freebsd.go | 30 - .../vendor/golang.org/x/sys/unix/dev_linux.go | 42 - .../vendor/golang.org/x/sys/unix/dev_netbsd.go | 29 - .../vendor/golang.org/x/sys/unix/dev_openbsd.go | 29 - .../vendor/golang.org/x/sys/unix/dirent.go | 17 - .../vendor/golang.org/x/sys/unix/endian_big.go | 9 - .../vendor/golang.org/x/sys/unix/endian_little.go | 9 - .../vendor/golang.org/x/sys/unix/env_unix.go | 31 - .../golang.org/x/sys/unix/errors_freebsd_386.go | 227 - .../golang.org/x/sys/unix/errors_freebsd_amd64.go | 227 - .../golang.org/x/sys/unix/errors_freebsd_arm.go | 226 - .../vendor/golang.org/x/sys/unix/fcntl.go | 32 - .../vendor/golang.org/x/sys/unix/fcntl_darwin.go | 18 - .../golang.org/x/sys/unix/fcntl_linux_32bit.go | 13 - .../vendor/golang.org/x/sys/unix/gccgo.go | 62 - .../vendor/golang.org/x/sys/unix/gccgo_c.c | 39 - .../golang.org/x/sys/unix/gccgo_linux_amd64.go | 20 - .../vendor/golang.org/x/sys/unix/ioctl.go | 30 - .../vendor/golang.org/x/sys/unix/mkall.sh | 212 - .../vendor/golang.org/x/sys/unix/mkasm_darwin.go | 61 - .../vendor/golang.org/x/sys/unix/mkerrors.sh | 659 - .../vendor/golang.org/x/sys/unix/mkpost.go | 106 - .../vendor/golang.org/x/sys/unix/mksyscall.go | 407 - .../golang.org/x/sys/unix/mksyscall_aix_ppc.go | 404 - .../golang.org/x/sys/unix/mksyscall_aix_ppc64.go | 602 - .../golang.org/x/sys/unix/mksyscall_solaris.go | 335 - .../golang.org/x/sys/unix/mksysctl_openbsd.pl | 265 - .../vendor/golang.org/x/sys/unix/mksysnum.go | 190 - .../vendor/golang.org/x/sys/unix/openbsd_pledge.go | 166 - .../vendor/golang.org/x/sys/unix/openbsd_unveil.go | 44 - .../vendor/golang.org/x/sys/unix/pagesize_unix.go | 15 - .../vendor/golang.org/x/sys/unix/race.go | 30 - .../vendor/golang.org/x/sys/unix/race0.go | 25 - .../vendor/golang.org/x/sys/unix/sockcmsg_linux.go | 36 - .../vendor/golang.org/x/sys/unix/sockcmsg_unix.go | 117 - .../vendor/golang.org/x/sys/unix/str.go | 26 - .../vendor/golang.org/x/sys/unix/syscall.go | 54 - .../vendor/golang.org/x/sys/unix/syscall_aix.go | 549 - .../golang.org/x/sys/unix/syscall_aix_ppc.go | 34 - .../golang.org/x/sys/unix/syscall_aix_ppc64.go | 34 - .../vendor/golang.org/x/sys/unix/syscall_bsd.go | 624 - .../vendor/golang.org/x/sys/unix/syscall_darwin.go | 706 - .../golang.org/x/sys/unix/syscall_darwin_386.go | 63 - .../golang.org/x/sys/unix/syscall_darwin_amd64.go | 63 - .../golang.org/x/sys/unix/syscall_darwin_arm.go | 64 - .../golang.org/x/sys/unix/syscall_darwin_arm64.go | 66 - .../x/sys/unix/syscall_darwin_libSystem.go | 31 - .../golang.org/x/sys/unix/syscall_dragonfly.go | 539 - .../x/sys/unix/syscall_dragonfly_amd64.go | 52 - .../golang.org/x/sys/unix/syscall_freebsd.go | 824 - .../golang.org/x/sys/unix/syscall_freebsd_386.go | 52 - .../golang.org/x/sys/unix/syscall_freebsd_amd64.go | 52 - .../golang.org/x/sys/unix/syscall_freebsd_arm.go | 52 - .../golang.org/x/sys/unix/syscall_freebsd_arm64.go | 52 - .../vendor/golang.org/x/sys/unix/syscall_linux.go | 1771 - .../golang.org/x/sys/unix/syscall_linux_386.go | 386 - .../golang.org/x/sys/unix/syscall_linux_amd64.go | 190 - .../x/sys/unix/syscall_linux_amd64_gc.go | 13 - .../golang.org/x/sys/unix/syscall_linux_arm.go | 274 - .../golang.org/x/sys/unix/syscall_linux_arm64.go | 223 - .../golang.org/x/sys/unix/syscall_linux_gc.go | 14 - .../golang.org/x/sys/unix/syscall_linux_gc_386.go | 16 - .../x/sys/unix/syscall_linux_gccgo_386.go | 30 - .../x/sys/unix/syscall_linux_gccgo_arm.go | 20 - .../golang.org/x/sys/unix/syscall_linux_mips64x.go | 222 - .../golang.org/x/sys/unix/syscall_linux_mipsx.go | 234 - .../golang.org/x/sys/unix/syscall_linux_ppc64x.go | 152 - .../golang.org/x/sys/unix/syscall_linux_riscv64.go | 226 - .../golang.org/x/sys/unix/syscall_linux_s390x.go | 338 - .../golang.org/x/sys/unix/syscall_linux_sparc64.go | 147 - .../vendor/golang.org/x/sys/unix/syscall_netbsd.go | 622 - .../golang.org/x/sys/unix/syscall_netbsd_386.go | 33 - .../golang.org/x/sys/unix/syscall_netbsd_amd64.go | 33 - .../golang.org/x/sys/unix/syscall_netbsd_arm.go | 33 - .../golang.org/x/sys/unix/syscall_netbsd_arm64.go | 33 - .../golang.org/x/sys/unix/syscall_openbsd.go | 416 - .../golang.org/x/sys/unix/syscall_openbsd_386.go | 37 - .../golang.org/x/sys/unix/syscall_openbsd_amd64.go | 37 - .../golang.org/x/sys/unix/syscall_openbsd_arm.go | 37 - .../golang.org/x/sys/unix/syscall_solaris.go | 737 - .../golang.org/x/sys/unix/syscall_solaris_amd64.go | 23 - .../vendor/golang.org/x/sys/unix/syscall_unix.go | 431 - .../golang.org/x/sys/unix/syscall_unix_gc.go | 15 - .../x/sys/unix/syscall_unix_gc_ppc64x.go | 24 - .../vendor/golang.org/x/sys/unix/timestruct.go | 82 - .../vendor/golang.org/x/sys/unix/types_aix.go | 236 - .../vendor/golang.org/x/sys/unix/types_darwin.go | 283 - .../golang.org/x/sys/unix/types_dragonfly.go | 263 - .../vendor/golang.org/x/sys/unix/types_freebsd.go | 356 - .../vendor/golang.org/x/sys/unix/types_netbsd.go | 289 - .../vendor/golang.org/x/sys/unix/types_openbsd.go | 282 - .../vendor/golang.org/x/sys/unix/types_solaris.go | 266 - .../vendor/golang.org/x/sys/unix/xattr_bsd.go | 240 - .../golang.org/x/sys/unix/zerrors_aix_ppc.go | 1372 - .../golang.org/x/sys/unix/zerrors_aix_ppc64.go | 1373 - .../golang.org/x/sys/unix/zerrors_darwin_386.go | 1783 - .../golang.org/x/sys/unix/zerrors_darwin_amd64.go | 1783 - .../golang.org/x/sys/unix/zerrors_darwin_arm.go | 1783 - .../golang.org/x/sys/unix/zerrors_darwin_arm64.go | 1783 - .../x/sys/unix/zerrors_dragonfly_amd64.go | 1650 - .../golang.org/x/sys/unix/zerrors_freebsd_386.go | 1793 - .../golang.org/x/sys/unix/zerrors_freebsd_amd64.go | 1794 - .../golang.org/x/sys/unix/zerrors_freebsd_arm.go | 1802 - .../golang.org/x/sys/unix/zerrors_freebsd_arm64.go | 1794 - .../golang.org/x/sys/unix/zerrors_linux_386.go | 2835 - .../golang.org/x/sys/unix/zerrors_linux_amd64.go | 2835 - .../golang.org/x/sys/unix/zerrors_linux_arm.go | 2841 - .../golang.org/x/sys/unix/zerrors_linux_arm64.go | 2826 - .../golang.org/x/sys/unix/zerrors_linux_mips.go | 2842 - .../golang.org/x/sys/unix/zerrors_linux_mips64.go | 2842 - .../x/sys/unix/zerrors_linux_mips64le.go | 2842 - .../golang.org/x/sys/unix/zerrors_linux_mipsle.go | 2842 - .../golang.org/x/sys/unix/zerrors_linux_ppc64.go | 2897 - .../golang.org/x/sys/unix/zerrors_linux_ppc64le.go | 2897 - .../golang.org/x/sys/unix/zerrors_linux_riscv64.go | 2822 - .../golang.org/x/sys/unix/zerrors_linux_s390x.go | 2895 - .../golang.org/x/sys/unix/zerrors_linux_sparc64.go | 2891 - .../golang.org/x/sys/unix/zerrors_netbsd_386.go | 1772 - .../golang.org/x/sys/unix/zerrors_netbsd_amd64.go | 1762 - .../golang.org/x/sys/unix/zerrors_netbsd_arm.go | 1751 - .../golang.org/x/sys/unix/zerrors_netbsd_arm64.go | 1762 - .../golang.org/x/sys/unix/zerrors_openbsd_386.go | 1654 - .../golang.org/x/sys/unix/zerrors_openbsd_amd64.go | 1765 - .../golang.org/x/sys/unix/zerrors_openbsd_arm.go | 1656 - .../golang.org/x/sys/unix/zerrors_solaris_amd64.go | 1532 - .../golang.org/x/sys/unix/zptrace386_linux.go | 80 - .../golang.org/x/sys/unix/zptracearm_linux.go | 41 - .../golang.org/x/sys/unix/zptracemips_linux.go | 50 - .../golang.org/x/sys/unix/zptracemipsle_linux.go | 50 - .../golang.org/x/sys/unix/zsyscall_aix_ppc.go | 1450 - .../golang.org/x/sys/unix/zsyscall_aix_ppc64.go | 1416 - .../golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go | 1172 - .../x/sys/unix/zsyscall_aix_ppc64_gccgo.go | 1051 - .../x/sys/unix/zsyscall_darwin_386.1_11.go | 1810 - .../golang.org/x/sys/unix/zsyscall_darwin_386.go | 2505 - .../golang.org/x/sys/unix/zsyscall_darwin_386.s | 284 - .../x/sys/unix/zsyscall_darwin_amd64.1_11.go | 1810 - .../golang.org/x/sys/unix/zsyscall_darwin_amd64.go | 2520 - .../golang.org/x/sys/unix/zsyscall_darwin_amd64.s | 286 - .../x/sys/unix/zsyscall_darwin_arm.1_11.go | 1793 - .../golang.org/x/sys/unix/zsyscall_darwin_arm.go | 2483 - .../golang.org/x/sys/unix/zsyscall_darwin_arm.s | 282 - .../x/sys/unix/zsyscall_darwin_arm64.1_11.go | 1793 - .../golang.org/x/sys/unix/zsyscall_darwin_arm64.go | 2483 - .../golang.org/x/sys/unix/zsyscall_darwin_arm64.s | 282 - .../x/sys/unix/zsyscall_dragonfly_amd64.go | 1659 - .../golang.org/x/sys/unix/zsyscall_freebsd_386.go | 2015 - .../x/sys/unix/zsyscall_freebsd_amd64.go | 2015 - .../golang.org/x/sys/unix/zsyscall_freebsd_arm.go | 2015 - .../x/sys/unix/zsyscall_freebsd_arm64.go | 2015 - .../golang.org/x/sys/unix/zsyscall_linux_386.go | 2220 - .../golang.org/x/sys/unix/zsyscall_linux_amd64.go | 2387 - .../golang.org/x/sys/unix/zsyscall_linux_arm.go | 2342 - .../golang.org/x/sys/unix/zsyscall_linux_arm64.go | 2244 - .../golang.org/x/sys/unix/zsyscall_linux_mips.go | 2400 - .../golang.org/x/sys/unix/zsyscall_linux_mips64.go | 2371 - .../x/sys/unix/zsyscall_linux_mips64le.go | 2371 - .../golang.org/x/sys/unix/zsyscall_linux_mipsle.go | 2400 - .../golang.org/x/sys/unix/zsyscall_linux_ppc64.go | 2449 - .../x/sys/unix/zsyscall_linux_ppc64le.go | 2449 - .../x/sys/unix/zsyscall_linux_riscv64.go | 2224 - .../golang.org/x/sys/unix/zsyscall_linux_s390x.go | 2219 - .../x/sys/unix/zsyscall_linux_sparc64.go | 2382 - .../golang.org/x/sys/unix/zsyscall_netbsd_386.go | 1826 - .../golang.org/x/sys/unix/zsyscall_netbsd_amd64.go | 1826 - .../golang.org/x/sys/unix/zsyscall_netbsd_arm.go | 1826 - .../golang.org/x/sys/unix/zsyscall_netbsd_arm64.go | 1826 - .../golang.org/x/sys/unix/zsyscall_openbsd_386.go | 1692 - .../x/sys/unix/zsyscall_openbsd_amd64.go | 1692 - .../golang.org/x/sys/unix/zsyscall_openbsd_arm.go | 1692 - .../x/sys/unix/zsyscall_solaris_amd64.go | 1953 - .../golang.org/x/sys/unix/zsysctl_openbsd_386.go | 270 - .../golang.org/x/sys/unix/zsysctl_openbsd_amd64.go | 270 - .../golang.org/x/sys/unix/zsysctl_openbsd_arm.go | 270 - .../golang.org/x/sys/unix/zsysnum_darwin_386.go | 436 - .../golang.org/x/sys/unix/zsysnum_darwin_amd64.go | 438 - .../golang.org/x/sys/unix/zsysnum_darwin_arm.go | 436 - .../golang.org/x/sys/unix/zsysnum_darwin_arm64.go | 436 - .../x/sys/unix/zsysnum_dragonfly_amd64.go | 315 - .../golang.org/x/sys/unix/zsysnum_freebsd_386.go | 403 - .../golang.org/x/sys/unix/zsysnum_freebsd_amd64.go | 403 - .../golang.org/x/sys/unix/zsysnum_freebsd_arm.go | 403 - .../golang.org/x/sys/unix/zsysnum_freebsd_arm64.go | 395 - .../golang.org/x/sys/unix/zsysnum_linux_386.go | 392 - .../golang.org/x/sys/unix/zsysnum_linux_amd64.go | 344 - .../golang.org/x/sys/unix/zsysnum_linux_arm.go | 364 - .../golang.org/x/sys/unix/zsysnum_linux_arm64.go | 289 - .../golang.org/x/sys/unix/zsysnum_linux_mips.go | 377 - .../golang.org/x/sys/unix/zsysnum_linux_mips64.go | 337 - .../x/sys/unix/zsysnum_linux_mips64le.go | 337 - .../golang.org/x/sys/unix/zsysnum_linux_mipsle.go | 377 - .../golang.org/x/sys/unix/zsysnum_linux_ppc64.go | 375 - .../golang.org/x/sys/unix/zsysnum_linux_ppc64le.go | 375 - .../golang.org/x/sys/unix/zsysnum_linux_riscv64.go | 288 - .../golang.org/x/sys/unix/zsysnum_linux_s390x.go | 337 - .../golang.org/x/sys/unix/zsysnum_linux_sparc64.go | 351 - .../golang.org/x/sys/unix/zsysnum_netbsd_386.go | 274 - .../golang.org/x/sys/unix/zsysnum_netbsd_amd64.go | 274 - .../golang.org/x/sys/unix/zsysnum_netbsd_arm.go | 274 - .../golang.org/x/sys/unix/zsysnum_netbsd_arm64.go | 274 - .../golang.org/x/sys/unix/zsysnum_openbsd_386.go | 218 - .../golang.org/x/sys/unix/zsysnum_openbsd_amd64.go | 218 - .../golang.org/x/sys/unix/zsysnum_openbsd_arm.go | 218 - .../vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go | 345 - .../golang.org/x/sys/unix/ztypes_aix_ppc64.go | 354 - .../golang.org/x/sys/unix/ztypes_darwin_386.go | 499 - .../golang.org/x/sys/unix/ztypes_darwin_amd64.go | 509 - .../golang.org/x/sys/unix/ztypes_darwin_arm.go | 500 - .../golang.org/x/sys/unix/ztypes_darwin_arm64.go | 509 - .../x/sys/unix/ztypes_dragonfly_amd64.go | 469 - .../golang.org/x/sys/unix/ztypes_freebsd_386.go | 603 - .../golang.org/x/sys/unix/ztypes_freebsd_amd64.go | 602 - .../golang.org/x/sys/unix/ztypes_freebsd_arm.go | 602 - .../golang.org/x/sys/unix/ztypes_freebsd_arm64.go | 602 - .../golang.org/x/sys/unix/ztypes_linux_386.go | 2083 - .../golang.org/x/sys/unix/ztypes_linux_amd64.go | 2096 - .../golang.org/x/sys/unix/ztypes_linux_arm.go | 2074 - .../golang.org/x/sys/unix/ztypes_linux_arm64.go | 2075 - .../golang.org/x/sys/unix/ztypes_linux_mips.go | 2080 - .../golang.org/x/sys/unix/ztypes_linux_mips64.go | 2077 - .../golang.org/x/sys/unix/ztypes_linux_mips64le.go | 2077 - .../golang.org/x/sys/unix/ztypes_linux_mipsle.go | 2080 - .../golang.org/x/sys/unix/ztypes_linux_ppc64.go | 2085 - .../golang.org/x/sys/unix/ztypes_linux_ppc64le.go | 2085 - .../golang.org/x/sys/unix/ztypes_linux_riscv64.go | 2102 - .../golang.org/x/sys/unix/ztypes_linux_s390x.go | 2099 - .../golang.org/x/sys/unix/ztypes_linux_sparc64.go | 2080 - .../golang.org/x/sys/unix/ztypes_netbsd_386.go | 465 - .../golang.org/x/sys/unix/ztypes_netbsd_amd64.go | 472 - .../golang.org/x/sys/unix/ztypes_netbsd_arm.go | 470 - .../golang.org/x/sys/unix/ztypes_netbsd_arm64.go | 472 - .../golang.org/x/sys/unix/ztypes_openbsd_386.go | 570 - .../golang.org/x/sys/unix/ztypes_openbsd_amd64.go | 570 - .../golang.org/x/sys/unix/ztypes_openbsd_arm.go | 571 - .../golang.org/x/sys/unix/ztypes_solaris_amd64.go | 442 - .../vendor/golang.org/x/sys/windows/aliases.go | 13 - .../golang.org/x/sys/windows/asm_windows_386.s | 13 - .../golang.org/x/sys/windows/asm_windows_amd64.s | 13 - .../golang.org/x/sys/windows/asm_windows_arm.s | 11 - .../vendor/golang.org/x/sys/windows/dll_windows.go | 378 - .../vendor/golang.org/x/sys/windows/env_windows.go | 29 - .../vendor/golang.org/x/sys/windows/eventlog.go | 20 - .../golang.org/x/sys/windows/exec_windows.go | 97 - .../golang.org/x/sys/windows/memory_windows.go | 26 - .../vendor/golang.org/x/sys/windows/mksyscall.go | 7 - .../vendor/golang.org/x/sys/windows/race.go | 30 - .../vendor/golang.org/x/sys/windows/race0.go | 25 - .../golang.org/x/sys/windows/security_windows.go | 649 - .../vendor/golang.org/x/sys/windows/service.go | 183 - .../vendor/golang.org/x/sys/windows/str.go | 22 - .../vendor/golang.org/x/sys/windows/syscall.go | 74 - .../golang.org/x/sys/windows/syscall_windows.go | 1219 - .../golang.org/x/sys/windows/types_windows.go | 1479 - .../golang.org/x/sys/windows/types_windows_386.go | 22 - .../x/sys/windows/types_windows_amd64.go | 22 - .../golang.org/x/sys/windows/types_windows_arm.go | 22 - .../golang.org/x/sys/windows/zsyscall_windows.go | 2766 - cmd/bpa-operator/vendor/golang.org/x/text/AUTHORS | 3 - .../vendor/golang.org/x/text/CONTRIBUTORS | 3 - cmd/bpa-operator/vendor/golang.org/x/text/LICENSE | 27 - cmd/bpa-operator/vendor/golang.org/x/text/PATENTS | 22 - .../golang.org/x/text/secure/bidirule/bidirule.go | 336 - .../x/text/secure/bidirule/bidirule10.0.0.go | 11 - .../x/text/secure/bidirule/bidirule9.0.0.go | 14 - .../golang.org/x/text/transform/transform.go | 705 - .../vendor/golang.org/x/text/unicode/bidi/bidi.go | 198 - .../golang.org/x/text/unicode/bidi/bracket.go | 335 - .../vendor/golang.org/x/text/unicode/bidi/core.go | 1058 - .../vendor/golang.org/x/text/unicode/bidi/gen.go | 133 - .../golang.org/x/text/unicode/bidi/gen_ranges.go | 57 - .../golang.org/x/text/unicode/bidi/gen_trieval.go | 64 - .../vendor/golang.org/x/text/unicode/bidi/prop.go | 206 - .../golang.org/x/text/unicode/bidi/tables10.0.0.go | 1815 - .../golang.org/x/text/unicode/bidi/tables9.0.0.go | 1781 - .../golang.org/x/text/unicode/bidi/trieval.go | 60 - .../golang.org/x/text/unicode/norm/composition.go | 508 - .../golang.org/x/text/unicode/norm/forminfo.go | 273 - .../vendor/golang.org/x/text/unicode/norm/input.go | 109 - .../vendor/golang.org/x/text/unicode/norm/iter.go | 457 - .../golang.org/x/text/unicode/norm/maketables.go | 986 - .../golang.org/x/text/unicode/norm/normalize.go | 609 - .../golang.org/x/text/unicode/norm/readwriter.go | 125 - .../golang.org/x/text/unicode/norm/tables10.0.0.go | 7657 --- .../golang.org/x/text/unicode/norm/tables9.0.0.go | 7637 --- .../golang.org/x/text/unicode/norm/transform.go | 88 - .../vendor/golang.org/x/text/unicode/norm/trie.go | 54 - .../golang.org/x/text/unicode/norm/triegen.go | 117 - .../vendor/golang.org/x/text/width/gen.go | 115 - .../vendor/golang.org/x/text/width/gen_common.go | 96 - .../vendor/golang.org/x/text/width/gen_trieval.go | 34 - .../vendor/golang.org/x/text/width/kind_string.go | 16 - .../vendor/golang.org/x/text/width/tables10.0.0.go | 1318 - .../vendor/golang.org/x/text/width/tables9.0.0.go | 1286 - .../vendor/golang.org/x/text/width/transform.go | 239 - .../vendor/golang.org/x/text/width/trieval.go | 30 - .../vendor/golang.org/x/text/width/width.go | 206 - cmd/bpa-operator/vendor/golang.org/x/time/AUTHORS | 3 - .../vendor/golang.org/x/time/CONTRIBUTORS | 3 - cmd/bpa-operator/vendor/golang.org/x/time/LICENSE | 27 - cmd/bpa-operator/vendor/golang.org/x/time/PATENTS | 22 - .../vendor/golang.org/x/time/rate/rate.go | 374 - cmd/bpa-operator/vendor/golang.org/x/tools/AUTHORS | 3 - .../vendor/golang.org/x/tools/CONTRIBUTORS | 3 - cmd/bpa-operator/vendor/golang.org/x/tools/LICENSE | 27 - cmd/bpa-operator/vendor/golang.org/x/tools/PATENTS | 22 - .../golang.org/x/tools/go/ast/astutil/enclosing.go | 627 - .../golang.org/x/tools/go/ast/astutil/imports.go | 481 - .../golang.org/x/tools/go/ast/astutil/rewrite.go | 477 - .../golang.org/x/tools/go/ast/astutil/util.go | 14 - .../x/tools/go/gcexportdata/gcexportdata.go | 109 - .../golang.org/x/tools/go/gcexportdata/importer.go | 73 - .../golang.org/x/tools/go/gcexportdata/main.go | 99 - .../x/tools/go/internal/gcimporter/bexport.go | 852 - .../x/tools/go/internal/gcimporter/bimport.go | 1036 - .../x/tools/go/internal/gcimporter/exportdata.go | 93 - .../x/tools/go/internal/gcimporter/gcimporter.go | 1078 - .../x/tools/go/internal/gcimporter/iexport.go | 723 - .../x/tools/go/internal/gcimporter/iimport.go | 606 - .../tools/go/internal/gcimporter/newInterface10.go | 21 - .../tools/go/internal/gcimporter/newInterface11.go | 13 - .../x/tools/go/internal/packagesdriver/sizes.go | 160 - .../vendor/golang.org/x/tools/go/packages/doc.go | 222 - .../golang.org/x/tools/go/packages/external.go | 79 - .../golang.org/x/tools/go/packages/golist.go | 828 - .../x/tools/go/packages/golist_overlay.go | 104 - .../golang.org/x/tools/go/packages/packages.go | 1059 - .../vendor/golang.org/x/tools/go/packages/visit.go | 55 - .../vendor/golang.org/x/tools/imports/fix.go | 1259 - .../vendor/golang.org/x/tools/imports/imports.go | 315 - .../vendor/golang.org/x/tools/imports/mkindex.go | 173 - .../vendor/golang.org/x/tools/imports/mkstdlib.go | 132 - .../vendor/golang.org/x/tools/imports/mod.go | 355 - .../golang.org/x/tools/imports/sortimports.go | 230 - .../vendor/golang.org/x/tools/imports/zstdlib.go | 10325 ---- .../x/tools/internal/fastwalk/fastwalk.go | 196 - .../internal/fastwalk/fastwalk_dirent_fileno.go | 13 - .../tools/internal/fastwalk/fastwalk_dirent_ino.go | 14 - .../fastwalk/fastwalk_dirent_namlen_bsd.go | 13 - .../fastwalk/fastwalk_dirent_namlen_linux.go | 29 - .../x/tools/internal/fastwalk/fastwalk_portable.go | 37 - .../x/tools/internal/fastwalk/fastwalk_unix.go | 127 - .../golang.org/x/tools/internal/gopathwalk/walk.go | 250 - .../golang.org/x/tools/internal/module/module.go | 540 - .../golang.org/x/tools/internal/semver/semver.go | 388 - .../vendor/google.golang.org/api/AUTHORS | 10 - .../vendor/google.golang.org/api/CONTRIBUTORS | 55 - .../vendor/google.golang.org/api/LICENSE | 27 - .../api/support/bundler/bundler.go | 349 - .../vendor/google.golang.org/appengine/.travis.yml | 20 - .../google.golang.org/appengine/CONTRIBUTING.md | 90 - .../vendor/google.golang.org/appengine/LICENSE | 202 - .../vendor/google.golang.org/appengine/README.md | 73 - .../google.golang.org/appengine/appengine.go | 135 - .../google.golang.org/appengine/appengine_vm.go | 20 - .../vendor/google.golang.org/appengine/errors.go | 46 - .../vendor/google.golang.org/appengine/go.mod | 7 - .../vendor/google.golang.org/appengine/go.sum | 6 - .../vendor/google.golang.org/appengine/identity.go | 142 - .../google.golang.org/appengine/internal/api.go | 675 - .../appengine/internal/api_classic.go | 169 - .../appengine/internal/api_common.go | 123 - .../google.golang.org/appengine/internal/app_id.go | 28 - .../app_identity/app_identity_service.pb.go | 611 - .../app_identity/app_identity_service.proto | 64 - .../appengine/internal/base/api_base.pb.go | 308 - .../appengine/internal/base/api_base.proto | 33 - .../internal/datastore/datastore_v3.pb.go | 4367 -- .../internal/datastore/datastore_v3.proto | 551 - .../appengine/internal/identity.go | 55 - .../appengine/internal/identity_classic.go | 61 - .../appengine/internal/identity_flex.go | 11 - .../appengine/internal/identity_vm.go | 134 - .../appengine/internal/internal.go | 110 - .../appengine/internal/log/log_service.pb.go | 1313 - .../appengine/internal/log/log_service.proto | 150 - .../google.golang.org/appengine/internal/main.go | 16 - .../appengine/internal/main_common.go | 7 - .../appengine/internal/main_vm.go | 69 - .../appengine/internal/metadata.go | 60 - .../internal/modules/modules_service.pb.go | 786 - .../internal/modules/modules_service.proto | 80 - .../google.golang.org/appengine/internal/net.go | 56 - .../google.golang.org/appengine/internal/regen.sh | 40 - .../appengine/internal/remote_api/remote_api.pb.go | 361 - .../appengine/internal/remote_api/remote_api.proto | 44 - .../appengine/internal/transaction.go | 115 - .../internal/urlfetch/urlfetch_service.pb.go | 527 - .../internal/urlfetch/urlfetch_service.proto | 64 - .../google.golang.org/appengine/namespace.go | 25 - .../vendor/google.golang.org/appengine/timeout.go | 20 - .../google.golang.org/appengine/travis_install.sh | 18 - .../google.golang.org/appengine/travis_test.sh | 12 - .../appengine/urlfetch/urlfetch.go | 210 - .../vendor/google.golang.org/genproto/LICENSE | 202 - .../googleapis/api/httpbody/httpbody.pb.go | 142 - .../genproto/googleapis/rpc/status/status.pb.go | 159 - .../genproto/protobuf/field_mask/field_mask.pb.go | 280 - .../vendor/google.golang.org/grpc/.travis.yml | 39 - .../vendor/google.golang.org/grpc/AUTHORS | 1 - .../vendor/google.golang.org/grpc/CONTRIBUTING.md | 37 - .../vendor/google.golang.org/grpc/LICENSE | 202 - .../vendor/google.golang.org/grpc/Makefile | 60 - .../vendor/google.golang.org/grpc/README.md | 67 - .../vendor/google.golang.org/grpc/backoff.go | 38 - .../vendor/google.golang.org/grpc/balancer.go | 391 - .../google.golang.org/grpc/balancer/balancer.go | 304 - .../grpc/balancer/base/balancer.go | 171 - .../google.golang.org/grpc/balancer/base/base.go | 64 - .../grpc/balancer/roundrobin/roundrobin.go | 83 - .../grpc/balancer_conn_wrappers.go | 328 - .../google.golang.org/grpc/balancer_v1_wrapper.go | 341 - .../binarylog/grpc_binarylog_v1/binarylog.pb.go | 900 - .../vendor/google.golang.org/grpc/call.go | 74 - .../vendor/google.golang.org/grpc/clientconn.go | 1446 - .../vendor/google.golang.org/grpc/codec.go | 50 - .../vendor/google.golang.org/grpc/codegen.sh | 17 - .../google.golang.org/grpc/codes/code_string.go | 62 - .../vendor/google.golang.org/grpc/codes/codes.go | 197 - .../grpc/connectivity/connectivity.go | 73 - .../grpc/credentials/credentials.go | 328 - .../grpc/credentials/internal/syscallconn.go | 61 - .../credentials/internal/syscallconn_appengine.go | 30 - .../google.golang.org/grpc/credentials/tls13.go | 30 - .../vendor/google.golang.org/grpc/dialoptions.go | 502 - .../vendor/google.golang.org/grpc/doc.go | 24 - .../google.golang.org/grpc/encoding/encoding.go | 118 - .../google.golang.org/grpc/encoding/proto/proto.go | 110 - .../vendor/google.golang.org/grpc/go.mod | 20 - .../vendor/google.golang.org/grpc/go.sum | 32 - .../google.golang.org/grpc/grpclog/grpclog.go | 126 - .../google.golang.org/grpc/grpclog/logger.go | 85 - .../google.golang.org/grpc/grpclog/loggerv2.go | 195 - .../vendor/google.golang.org/grpc/install_gae.sh | 6 - .../vendor/google.golang.org/grpc/interceptor.go | 77 - .../grpc/internal/backoff/backoff.go | 78 - .../grpc/internal/binarylog/binarylog.go | 167 - .../grpc/internal/binarylog/binarylog_testutil.go | 42 - .../grpc/internal/binarylog/env_config.go | 210 - .../grpc/internal/binarylog/method_logger.go | 423 - .../grpc/internal/binarylog/regenerate.sh | 33 - .../grpc/internal/binarylog/sink.go | 162 - .../grpc/internal/binarylog/util.go | 41 - .../grpc/internal/channelz/funcs.go | 699 - .../grpc/internal/channelz/types.go | 702 - .../grpc/internal/channelz/types_linux.go | 53 - .../grpc/internal/channelz/types_nonlinux.go | 44 - .../grpc/internal/channelz/util_linux.go | 39 - .../grpc/internal/channelz/util_nonlinux.go | 26 - .../grpc/internal/envconfig/envconfig.go | 71 - .../grpc/internal/grpcrand/grpcrand.go | 56 - .../grpc/internal/grpcsync/event.go | 61 - .../google.golang.org/grpc/internal/internal.go | 54 - .../grpc/internal/syscall/syscall_linux.go | 114 - .../grpc/internal/syscall/syscall_nonlinux.go | 63 - .../grpc/internal/transport/bdp_estimator.go | 141 - .../grpc/internal/transport/controlbuf.go | 852 - .../grpc/internal/transport/defaults.go | 49 - .../grpc/internal/transport/flowcontrol.go | 218 - .../grpc/internal/transport/handler_server.go | 449 - .../grpc/internal/transport/http2_client.go | 1382 - .../grpc/internal/transport/http2_server.go | 1209 - .../grpc/internal/transport/http_util.go | 623 - .../grpc/internal/transport/log.go | 44 - .../grpc/internal/transport/transport.go | 758 - .../google.golang.org/grpc/keepalive/keepalive.go | 85 - .../google.golang.org/grpc/metadata/metadata.go | 209 - .../google.golang.org/grpc/naming/dns_resolver.go | 293 - .../vendor/google.golang.org/grpc/naming/naming.go | 69 - .../vendor/google.golang.org/grpc/peer/peer.go | 51 - .../google.golang.org/grpc/picker_wrapper.go | 184 - .../vendor/google.golang.org/grpc/pickfirst.go | 110 - .../vendor/google.golang.org/grpc/proxy.go | 152 - .../grpc/resolver/dns/dns_resolver.go | 436 - .../grpc/resolver/passthrough/passthrough.go | 57 - .../google.golang.org/grpc/resolver/resolver.go | 158 - .../grpc/resolver_conn_wrapper.go | 155 - .../vendor/google.golang.org/grpc/rpc_util.go | 843 - .../vendor/google.golang.org/grpc/server.go | 1491 - .../google.golang.org/grpc/service_config.go | 372 - .../google.golang.org/grpc/stats/handlers.go | 63 - .../vendor/google.golang.org/grpc/stats/stats.go | 295 - .../vendor/google.golang.org/grpc/status/status.go | 210 - .../vendor/google.golang.org/grpc/stream.go | 1485 - .../vendor/google.golang.org/grpc/tap/tap.go | 51 - .../vendor/google.golang.org/grpc/trace.go | 113 - .../vendor/google.golang.org/grpc/version.go | 22 - .../vendor/google.golang.org/grpc/vet.sh | 123 - cmd/bpa-operator/vendor/gopkg.in/inf.v0/LICENSE | 28 - cmd/bpa-operator/vendor/gopkg.in/inf.v0/dec.go | 615 - cmd/bpa-operator/vendor/gopkg.in/inf.v0/rounder.go | 145 - cmd/bpa-operator/vendor/gopkg.in/ini.v1/.gitignore | 6 - .../vendor/gopkg.in/ini.v1/.travis.yml | 19 - cmd/bpa-operator/vendor/gopkg.in/ini.v1/LICENSE | 191 - cmd/bpa-operator/vendor/gopkg.in/ini.v1/Makefile | 15 - cmd/bpa-operator/vendor/gopkg.in/ini.v1/README.md | 54 - cmd/bpa-operator/vendor/gopkg.in/ini.v1/error.go | 34 - cmd/bpa-operator/vendor/gopkg.in/ini.v1/file.go | 418 - cmd/bpa-operator/vendor/gopkg.in/ini.v1/ini.go | 223 - cmd/bpa-operator/vendor/gopkg.in/ini.v1/key.go | 753 - cmd/bpa-operator/vendor/gopkg.in/ini.v1/parser.go | 487 - cmd/bpa-operator/vendor/gopkg.in/ini.v1/section.go | 256 - cmd/bpa-operator/vendor/gopkg.in/ini.v1/struct.go | 563 - .../vendor/gopkg.in/yaml.v2/.travis.yml | 12 - cmd/bpa-operator/vendor/gopkg.in/yaml.v2/LICENSE | 201 - .../vendor/gopkg.in/yaml.v2/LICENSE.libyaml | 31 - cmd/bpa-operator/vendor/gopkg.in/yaml.v2/NOTICE | 13 - cmd/bpa-operator/vendor/gopkg.in/yaml.v2/README.md | 133 - cmd/bpa-operator/vendor/gopkg.in/yaml.v2/apic.go | 739 - cmd/bpa-operator/vendor/gopkg.in/yaml.v2/decode.go | 775 - .../vendor/gopkg.in/yaml.v2/emitterc.go | 1685 - cmd/bpa-operator/vendor/gopkg.in/yaml.v2/encode.go | 390 - cmd/bpa-operator/vendor/gopkg.in/yaml.v2/go.mod | 5 - .../vendor/gopkg.in/yaml.v2/parserc.go | 1095 - .../vendor/gopkg.in/yaml.v2/readerc.go | 412 - .../vendor/gopkg.in/yaml.v2/resolve.go | 258 - .../vendor/gopkg.in/yaml.v2/scannerc.go | 2696 - cmd/bpa-operator/vendor/gopkg.in/yaml.v2/sorter.go | 113 - .../vendor/gopkg.in/yaml.v2/writerc.go | 26 - cmd/bpa-operator/vendor/gopkg.in/yaml.v2/yaml.go | 466 - cmd/bpa-operator/vendor/gopkg.in/yaml.v2/yamlh.go | 738 - .../vendor/gopkg.in/yaml.v2/yamlprivateh.go | 173 - cmd/bpa-operator/vendor/k8s.io/api/LICENSE | 202 - .../vendor/k8s.io/api/admission/v1beta1/doc.go | 22 - .../k8s.io/api/admission/v1beta1/generated.pb.go | 1390 - .../k8s.io/api/admission/v1beta1/generated.proto | 123 - .../k8s.io/api/admission/v1beta1/register.go | 51 - .../vendor/k8s.io/api/admission/v1beta1/types.go | 127 - .../v1beta1/types_swagger_doc_generated.go | 73 - .../api/admission/v1beta1/zz_generated.deepcopy.go | 125 - .../api/admissionregistration/v1alpha1/doc.go | 25 - .../admissionregistration/v1alpha1/generated.pb.go | 1008 - .../admissionregistration/v1alpha1/generated.proto | 107 - .../api/admissionregistration/v1alpha1/register.go | 51 - .../api/admissionregistration/v1alpha1/types.go | 106 - .../v1alpha1/types_swagger_doc_generated.go | 71 - .../v1alpha1/zz_generated.deepcopy.go | 145 - .../api/admissionregistration/v1beta1/doc.go | 25 - .../admissionregistration/v1beta1/generated.pb.go | 2171 - .../admissionregistration/v1beta1/generated.proto | 269 - .../api/admissionregistration/v1beta1/register.go | 53 - .../api/admissionregistration/v1beta1/types.go | 306 - .../v1beta1/types_swagger_doc_generated.go | 126 - .../v1beta1/zz_generated.deepcopy.go | 302 - cmd/bpa-operator/vendor/k8s.io/api/apps/v1/doc.go | 20 - .../vendor/k8s.io/api/apps/v1/generated.pb.go | 6924 --- .../vendor/k8s.io/api/apps/v1/generated.proto | 701 - .../vendor/k8s.io/api/apps/v1/register.go | 60 - .../vendor/k8s.io/api/apps/v1/types.go | 826 - .../api/apps/v1/types_swagger_doc_generated.go | 365 - .../k8s.io/api/apps/v1/zz_generated.deepcopy.go | 772 - .../vendor/k8s.io/api/apps/v1beta1/doc.go | 20 - .../vendor/k8s.io/api/apps/v1beta1/generated.pb.go | 5275 -- .../vendor/k8s.io/api/apps/v1beta1/generated.proto | 484 - .../vendor/k8s.io/api/apps/v1beta1/register.go | 58 - .../vendor/k8s.io/api/apps/v1beta1/types.go | 567 - .../apps/v1beta1/types_swagger_doc_generated.go | 273 - .../api/apps/v1beta1/zz_generated.deepcopy.go | 594 - .../vendor/k8s.io/api/apps/v1beta2/doc.go | 20 - .../vendor/k8s.io/api/apps/v1beta2/generated.pb.go | 7567 --- .../vendor/k8s.io/api/apps/v1beta2/generated.proto | 752 - .../vendor/k8s.io/api/apps/v1beta2/register.go | 61 - .../vendor/k8s.io/api/apps/v1beta2/types.go | 876 - .../apps/v1beta2/types_swagger_doc_generated.go | 396 - .../api/apps/v1beta2/zz_generated.deepcopy.go | 839 - .../k8s.io/api/auditregistration/v1alpha1/doc.go | 22 - .../api/auditregistration/v1alpha1/generated.pb.go | 1685 - .../api/auditregistration/v1alpha1/generated.proto | 158 - .../api/auditregistration/v1alpha1/register.go | 56 - .../k8s.io/api/auditregistration/v1alpha1/types.go | 194 - .../v1alpha1/types_swagger_doc_generated.go | 110 - .../v1alpha1/zz_generated.deepcopy.go | 224 - .../vendor/k8s.io/api/authentication/v1/doc.go | 21 - .../k8s.io/api/authentication/v1/generated.pb.go | 2233 - .../k8s.io/api/authentication/v1/generated.proto | 179 - .../k8s.io/api/authentication/v1/register.go | 52 - .../vendor/k8s.io/api/authentication/v1/types.go | 186 - .../v1/types_swagger_doc_generated.go | 115 - .../api/authentication/v1/zz_generated.deepcopy.go | 244 - .../k8s.io/api/authentication/v1beta1/doc.go | 21 - .../api/authentication/v1beta1/generated.pb.go | 1388 - .../api/authentication/v1beta1/generated.proto | 118 - .../k8s.io/api/authentication/v1beta1/register.go | 51 - .../k8s.io/api/authentication/v1beta1/types.go | 110 - .../v1beta1/types_swagger_doc_generated.go | 74 - .../v1beta1/zz_generated.deepcopy.go | 152 - .../vendor/k8s.io/api/authorization/v1/doc.go | 22 - .../k8s.io/api/authorization/v1/generated.pb.go | 3511 -- .../k8s.io/api/authorization/v1/generated.proto | 272 - .../vendor/k8s.io/api/authorization/v1/register.go | 55 - .../vendor/k8s.io/api/authorization/v1/types.go | 268 - .../v1/types_swagger_doc_generated.go | 173 - .../api/authorization/v1/zz_generated.deepcopy.go | 385 - .../vendor/k8s.io/api/authorization/v1beta1/doc.go | 22 - .../api/authorization/v1beta1/generated.pb.go | 3511 -- .../api/authorization/v1beta1/generated.proto | 272 - .../k8s.io/api/authorization/v1beta1/register.go | 55 - .../k8s.io/api/authorization/v1beta1/types.go | 268 - .../v1beta1/types_swagger_doc_generated.go | 173 - .../authorization/v1beta1/zz_generated.deepcopy.go | 385 - .../vendor/k8s.io/api/autoscaling/v1/doc.go | 20 - .../k8s.io/api/autoscaling/v1/generated.pb.go | 4691 -- .../k8s.io/api/autoscaling/v1/generated.proto | 415 - .../vendor/k8s.io/api/autoscaling/v1/register.go | 53 - .../vendor/k8s.io/api/autoscaling/v1/types.go | 428 - .../autoscaling/v1/types_swagger_doc_generated.go | 250 - .../api/autoscaling/v1/zz_generated.deepcopy.go | 515 - .../vendor/k8s.io/api/autoscaling/v2beta1/doc.go | 20 - .../k8s.io/api/autoscaling/v2beta1/generated.pb.go | 4307 -- .../k8s.io/api/autoscaling/v2beta1/generated.proto | 397 - .../k8s.io/api/autoscaling/v2beta1/register.go | 52 - .../vendor/k8s.io/api/autoscaling/v2beta1/types.go | 405 - .../v2beta1/types_swagger_doc_generated.go | 221 - .../autoscaling/v2beta1/zz_generated.deepcopy.go | 466 - .../vendor/k8s.io/api/autoscaling/v2beta2/doc.go | 20 - .../k8s.io/api/autoscaling/v2beta2/generated.pb.go | 4419 -- .../k8s.io/api/autoscaling/v2beta2/generated.proto | 369 - .../k8s.io/api/autoscaling/v2beta2/register.go | 50 - .../vendor/k8s.io/api/autoscaling/v2beta2/types.go | 393 - .../v2beta2/types_swagger_doc_generated.go | 240 - .../autoscaling/v2beta2/zz_generated.deepcopy.go | 487 - cmd/bpa-operator/vendor/k8s.io/api/batch/v1/doc.go | 20 - .../vendor/k8s.io/api/batch/v1/generated.pb.go | 1627 - .../vendor/k8s.io/api/batch/v1/generated.proto | 184 - .../vendor/k8s.io/api/batch/v1/register.go | 52 - .../vendor/k8s.io/api/batch/v1/types.go | 193 - .../api/batch/v1/types_swagger_doc_generated.go | 95 - .../k8s.io/api/batch/v1/zz_generated.deepcopy.go | 188 - .../vendor/k8s.io/api/batch/v1beta1/doc.go | 20 - .../k8s.io/api/batch/v1beta1/generated.pb.go | 1490 - .../k8s.io/api/batch/v1beta1/generated.proto | 137 - .../vendor/k8s.io/api/batch/v1beta1/register.go | 53 - .../vendor/k8s.io/api/batch/v1beta1/types.go | 158 - .../batch/v1beta1/types_swagger_doc_generated.go | 96 - .../api/batch/v1beta1/zz_generated.deepcopy.go | 194 - .../vendor/k8s.io/api/batch/v2alpha1/doc.go | 20 - .../k8s.io/api/batch/v2alpha1/generated.pb.go | 1490 - .../k8s.io/api/batch/v2alpha1/generated.proto | 135 - .../vendor/k8s.io/api/batch/v2alpha1/register.go | 53 - .../vendor/k8s.io/api/batch/v2alpha1/types.go | 156 - .../batch/v2alpha1/types_swagger_doc_generated.go | 96 - .../api/batch/v2alpha1/zz_generated.deepcopy.go | 194 - .../vendor/k8s.io/api/certificates/v1beta1/doc.go | 22 - .../api/certificates/v1beta1/generated.pb.go | 1676 - .../api/certificates/v1beta1/generated.proto | 121 - .../k8s.io/api/certificates/v1beta1/register.go | 59 - .../k8s.io/api/certificates/v1beta1/types.go | 155 - .../v1beta1/types_swagger_doc_generated.go | 74 - .../certificates/v1beta1/zz_generated.deepcopy.go | 197 - .../vendor/k8s.io/api/coordination/v1beta1/doc.go | 22 - .../api/coordination/v1beta1/generated.pb.go | 864 - .../api/coordination/v1beta1/generated.proto | 80 - .../k8s.io/api/coordination/v1beta1/register.go | 53 - .../k8s.io/api/coordination/v1beta1/types.go | 74 - .../v1beta1/types_swagger_doc_generated.go | 63 - .../coordination/v1beta1/zz_generated.deepcopy.go | 124 - .../k8s.io/api/core/v1/annotation_key_constants.go | 100 - cmd/bpa-operator/vendor/k8s.io/api/core/v1/doc.go | 21 - .../vendor/k8s.io/api/core/v1/generated.pb.go | 52455 ------------------- .../vendor/k8s.io/api/core/v1/generated.proto | 4790 -- .../vendor/k8s.io/api/core/v1/objectreference.go | 33 - .../vendor/k8s.io/api/core/v1/register.go | 99 - .../vendor/k8s.io/api/core/v1/resource.go | 56 - .../vendor/k8s.io/api/core/v1/taint.go | 33 - .../vendor/k8s.io/api/core/v1/toleration.go | 56 - .../vendor/k8s.io/api/core/v1/types.go | 5361 -- .../api/core/v1/types_swagger_doc_generated.go | 2346 - .../k8s.io/api/core/v1/zz_generated.deepcopy.go | 5430 -- .../vendor/k8s.io/api/events/v1beta1/doc.go | 22 - .../k8s.io/api/events/v1beta1/generated.pb.go | 1287 - .../k8s.io/api/events/v1beta1/generated.proto | 121 - .../vendor/k8s.io/api/events/v1beta1/register.go | 53 - .../vendor/k8s.io/api/events/v1beta1/types.go | 122 - .../events/v1beta1/types_swagger_doc_generated.go | 73 - .../api/events/v1beta1/zz_generated.deepcopy.go | 117 - .../vendor/k8s.io/api/extensions/v1beta1/doc.go | 20 - .../k8s.io/api/extensions/v1beta1/generated.pb.go | 12298 ----- .../k8s.io/api/extensions/v1beta1/generated.proto | 1160 - .../k8s.io/api/extensions/v1beta1/register.go | 66 - .../vendor/k8s.io/api/extensions/v1beta1/types.go | 1360 - .../v1beta1/types_swagger_doc_generated.go | 640 - .../extensions/v1beta1/zz_generated.deepcopy.go | 1445 - .../vendor/k8s.io/api/networking/v1/doc.go | 21 - .../k8s.io/api/networking/v1/generated.pb.go | 1849 - .../k8s.io/api/networking/v1/generated.proto | 195 - .../vendor/k8s.io/api/networking/v1/register.go | 53 - .../vendor/k8s.io/api/networking/v1/types.go | 203 - .../networking/v1/types_swagger_doc_generated.go | 113 - .../api/networking/v1/zz_generated.deepcopy.go | 262 - .../vendor/k8s.io/api/policy/v1beta1/doc.go | 23 - .../k8s.io/api/policy/v1beta1/generated.pb.go | 4324 -- .../k8s.io/api/policy/v1beta1/generated.proto | 367 - .../vendor/k8s.io/api/policy/v1beta1/register.go | 56 - .../vendor/k8s.io/api/policy/v1beta1/types.go | 454 - .../policy/v1beta1/types_swagger_doc_generated.go | 222 - .../api/policy/v1beta1/zz_generated.deepcopy.go | 488 - cmd/bpa-operator/vendor/k8s.io/api/rbac/v1/doc.go | 22 - .../vendor/k8s.io/api/rbac/v1/generated.pb.go | 2729 - .../vendor/k8s.io/api/rbac/v1/generated.proto | 197 - .../vendor/k8s.io/api/rbac/v1/register.go | 58 - .../vendor/k8s.io/api/rbac/v1/types.go | 235 - .../api/rbac/v1/types_swagger_doc_generated.go | 158 - .../k8s.io/api/rbac/v1/zz_generated.deepcopy.go | 389 - .../vendor/k8s.io/api/rbac/v1alpha1/doc.go | 22 - .../k8s.io/api/rbac/v1alpha1/generated.pb.go | 2730 - .../k8s.io/api/rbac/v1alpha1/generated.proto | 199 - .../vendor/k8s.io/api/rbac/v1alpha1/register.go | 58 - .../vendor/k8s.io/api/rbac/v1alpha1/types.go | 237 - .../rbac/v1alpha1/types_swagger_doc_generated.go | 158 - .../api/rbac/v1alpha1/zz_generated.deepcopy.go | 389 - .../vendor/k8s.io/api/rbac/v1beta1/doc.go | 22 - .../vendor/k8s.io/api/rbac/v1beta1/generated.pb.go | 2729 - .../vendor/k8s.io/api/rbac/v1beta1/generated.proto | 198 - .../vendor/k8s.io/api/rbac/v1beta1/register.go | 58 - .../vendor/k8s.io/api/rbac/v1beta1/types.go | 235 - .../rbac/v1beta1/types_swagger_doc_generated.go | 158 - .../api/rbac/v1beta1/zz_generated.deepcopy.go | 389 - .../vendor/k8s.io/api/scheduling/v1alpha1/doc.go | 22 - .../k8s.io/api/scheduling/v1alpha1/generated.pb.go | 621 - .../k8s.io/api/scheduling/v1alpha1/generated.proto | 67 - .../k8s.io/api/scheduling/v1alpha1/register.go | 52 - .../vendor/k8s.io/api/scheduling/v1alpha1/types.go | 66 - .../v1alpha1/types_swagger_doc_generated.go | 52 - .../scheduling/v1alpha1/zz_generated.deepcopy.go | 84 - .../vendor/k8s.io/api/scheduling/v1beta1/doc.go | 22 - .../k8s.io/api/scheduling/v1beta1/generated.pb.go | 621 - .../k8s.io/api/scheduling/v1beta1/generated.proto | 67 - .../k8s.io/api/scheduling/v1beta1/register.go | 52 - .../vendor/k8s.io/api/scheduling/v1beta1/types.go | 66 - .../v1beta1/types_swagger_doc_generated.go | 52 - .../scheduling/v1beta1/zz_generated.deepcopy.go | 84 - .../vendor/k8s.io/api/settings/v1alpha1/doc.go | 22 - .../k8s.io/api/settings/v1alpha1/generated.pb.go | 910 - .../k8s.io/api/settings/v1alpha1/generated.proto | 75 - .../k8s.io/api/settings/v1alpha1/register.go | 52 - .../vendor/k8s.io/api/settings/v1alpha1/types.go | 70 - .../v1alpha1/types_swagger_doc_generated.go | 61 - .../api/settings/v1alpha1/zz_generated.deepcopy.go | 131 - .../vendor/k8s.io/api/storage/v1/doc.go | 21 - .../vendor/k8s.io/api/storage/v1/generated.pb.go | 2246 - .../vendor/k8s.io/api/storage/v1/generated.proto | 186 - .../vendor/k8s.io/api/storage/v1/register.go | 56 - .../vendor/k8s.io/api/storage/v1/types.go | 211 - .../api/storage/v1/types_swagger_doc_generated.go | 119 - .../k8s.io/api/storage/v1/zz_generated.deepcopy.go | 268 - .../vendor/k8s.io/api/storage/v1alpha1/doc.go | 21 - .../k8s.io/api/storage/v1alpha1/generated.pb.go | 1503 - .../k8s.io/api/storage/v1alpha1/generated.proto | 126 - .../vendor/k8s.io/api/storage/v1alpha1/register.go | 50 - .../vendor/k8s.io/api/storage/v1alpha1/types.go | 126 - .../v1alpha1/types_swagger_doc_generated.go | 93 - .../api/storage/v1alpha1/zz_generated.deepcopy.go | 174 - .../vendor/k8s.io/api/storage/v1beta1/doc.go | 21 - .../k8s.io/api/storage/v1beta1/generated.pb.go | 2246 - .../k8s.io/api/storage/v1beta1/generated.proto | 186 - .../vendor/k8s.io/api/storage/v1beta1/register.go | 56 - .../vendor/k8s.io/api/storage/v1beta1/types.go | 211 - .../storage/v1beta1/types_swagger_doc_generated.go | 119 - .../api/storage/v1beta1/zz_generated.deepcopy.go | 268 - .../vendor/k8s.io/apiextensions-apiserver/LICENSE | 202 - .../pkg/apis/apiextensions/deepcopy.go | 262 - .../pkg/apis/apiextensions/doc.go | 21 - .../pkg/apis/apiextensions/helpers.go | 149 - .../pkg/apis/apiextensions/register.go | 51 - .../pkg/apis/apiextensions/types.go | 368 - .../pkg/apis/apiextensions/types_jsonschema.go | 96 - .../pkg/apis/apiextensions/v1beta1/conversion.go | 73 - .../pkg/apis/apiextensions/v1beta1/deepcopy.go | 238 - .../pkg/apis/apiextensions/v1beta1/defaults.go | 81 - .../pkg/apis/apiextensions/v1beta1/doc.go | 24 - .../pkg/apis/apiextensions/v1beta1/generated.pb.go | 7429 --- .../pkg/apis/apiextensions/v1beta1/generated.proto | 522 - .../pkg/apis/apiextensions/v1beta1/marshal.go | 135 - .../pkg/apis/apiextensions/v1beta1/register.go | 62 - .../pkg/apis/apiextensions/v1beta1/types.go | 428 - .../apis/apiextensions/v1beta1/types_jsonschema.go | 150 - .../v1beta1/zz_generated.conversion.go | 1269 - .../apiextensions/v1beta1/zz_generated.deepcopy.go | 647 - .../apiextensions/v1beta1/zz_generated.defaults.go | 48 - .../apis/apiextensions/zz_generated.deepcopy.go | 544 - .../vendor/k8s.io/apimachinery/LICENSE | 202 - .../k8s.io/apimachinery/pkg/api/errors/OWNERS | 24 - .../k8s.io/apimachinery/pkg/api/errors/doc.go | 18 - .../k8s.io/apimachinery/pkg/api/errors/errors.go | 605 - .../vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS | 25 - .../vendor/k8s.io/apimachinery/pkg/api/meta/doc.go | 19 - .../k8s.io/apimachinery/pkg/api/meta/errors.go | 121 - .../pkg/api/meta/firsthit_restmapper.go | 97 - .../k8s.io/apimachinery/pkg/api/meta/help.go | 205 - .../k8s.io/apimachinery/pkg/api/meta/interfaces.go | 134 - .../k8s.io/apimachinery/pkg/api/meta/lazy.go | 104 - .../k8s.io/apimachinery/pkg/api/meta/meta.go | 650 - .../apimachinery/pkg/api/meta/multirestmapper.go | 210 - .../k8s.io/apimachinery/pkg/api/meta/priority.go | 222 - .../k8s.io/apimachinery/pkg/api/meta/restmapper.go | 518 - .../k8s.io/apimachinery/pkg/api/resource/OWNERS | 16 - .../k8s.io/apimachinery/pkg/api/resource/amount.go | 299 - .../apimachinery/pkg/api/resource/generated.pb.go | 75 - .../apimachinery/pkg/api/resource/generated.proto | 88 - .../k8s.io/apimachinery/pkg/api/resource/math.go | 314 - .../apimachinery/pkg/api/resource/quantity.go | 738 - .../pkg/api/resource/quantity_proto.go | 284 - .../apimachinery/pkg/api/resource/scale_int.go | 95 - .../k8s.io/apimachinery/pkg/api/resource/suffix.go | 198 - .../pkg/api/resource/zz_generated.deepcopy.go | 27 - .../pkg/apis/meta/internalversion/conversion.go | 54 - .../pkg/apis/meta/internalversion/doc.go | 20 - .../pkg/apis/meta/internalversion/register.go | 113 - .../pkg/apis/meta/internalversion/types.go | 70 - .../internalversion/zz_generated.conversion.go | 143 - .../meta/internalversion/zz_generated.deepcopy.go | 96 - .../k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS | 31 - .../pkg/apis/meta/v1/controller_ref.go | 54 - .../apimachinery/pkg/apis/meta/v1/conversion.go | 319 - .../k8s.io/apimachinery/pkg/apis/meta/v1/doc.go | 23 - .../apimachinery/pkg/apis/meta/v1/duration.go | 50 - .../apimachinery/pkg/apis/meta/v1/generated.pb.go | 8311 --- .../apimachinery/pkg/apis/meta/v1/generated.proto | 879 - .../apimachinery/pkg/apis/meta/v1/group_version.go | 148 - .../apimachinery/pkg/apis/meta/v1/helpers.go | 234 - .../k8s.io/apimachinery/pkg/apis/meta/v1/labels.go | 55 - .../k8s.io/apimachinery/pkg/apis/meta/v1/meta.go | 170 - .../apimachinery/pkg/apis/meta/v1/micro_time.go | 183 - .../pkg/apis/meta/v1/micro_time_proto.go | 72 - .../apimachinery/pkg/apis/meta/v1/register.go | 97 - .../k8s.io/apimachinery/pkg/apis/meta/v1/time.go | 185 - .../apimachinery/pkg/apis/meta/v1/time_proto.go | 92 - .../k8s.io/apimachinery/pkg/apis/meta/v1/types.go | 1009 - .../apis/meta/v1/types_swagger_doc_generated.go | 350 - .../pkg/apis/meta/v1/unstructured/helpers.go | 451 - .../pkg/apis/meta/v1/unstructured/unstructured.go | 452 - .../apis/meta/v1/unstructured/unstructured_list.go | 188 - .../meta/v1/unstructured/zz_generated.deepcopy.go | 55 - .../k8s.io/apimachinery/pkg/apis/meta/v1/watch.go | 89 - .../pkg/apis/meta/v1/zz_generated.deepcopy.go | 961 - .../pkg/apis/meta/v1/zz_generated.defaults.go | 32 - .../pkg/apis/meta/v1beta1/conversion.go | 27 - .../apimachinery/pkg/apis/meta/v1beta1/deepcopy.go | 44 - .../apimachinery/pkg/apis/meta/v1beta1/doc.go | 23 - .../pkg/apis/meta/v1beta1/generated.pb.go | 613 - .../pkg/apis/meta/v1beta1/generated.proto | 57 - .../apimachinery/pkg/apis/meta/v1beta1/register.go | 57 - .../apimachinery/pkg/apis/meta/v1beta1/types.go | 161 - .../meta/v1beta1/types_swagger_doc_generated.go | 104 - .../pkg/apis/meta/v1beta1/zz_generated.deepcopy.go | 189 - .../pkg/apis/meta/v1beta1/zz_generated.defaults.go | 32 - .../apimachinery/pkg/conversion/converter.go | 898 - .../apimachinery/pkg/conversion/deep_equal.go | 36 - .../k8s.io/apimachinery/pkg/conversion/doc.go | 24 - .../k8s.io/apimachinery/pkg/conversion/helper.go | 39 - .../pkg/conversion/queryparams/convert.go | 198 - .../apimachinery/pkg/conversion/queryparams/doc.go | 19 - .../vendor/k8s.io/apimachinery/pkg/fields/doc.go | 19 - .../k8s.io/apimachinery/pkg/fields/fields.go | 62 - .../k8s.io/apimachinery/pkg/fields/requirements.go | 30 - .../k8s.io/apimachinery/pkg/fields/selector.go | 476 - .../vendor/k8s.io/apimachinery/pkg/labels/doc.go | 19 - .../k8s.io/apimachinery/pkg/labels/labels.go | 181 - .../k8s.io/apimachinery/pkg/labels/selector.go | 891 - .../pkg/labels/zz_generated.deepcopy.go | 42 - .../k8s.io/apimachinery/pkg/runtime/codec.go | 332 - .../k8s.io/apimachinery/pkg/runtime/codec_check.go | 48 - .../k8s.io/apimachinery/pkg/runtime/conversion.go | 113 - .../k8s.io/apimachinery/pkg/runtime/converter.go | 805 - .../vendor/k8s.io/apimachinery/pkg/runtime/doc.go | 51 - .../k8s.io/apimachinery/pkg/runtime/embedded.go | 142 - .../k8s.io/apimachinery/pkg/runtime/error.go | 122 - .../k8s.io/apimachinery/pkg/runtime/extension.go | 51 - .../apimachinery/pkg/runtime/generated.pb.go | 753 - .../apimachinery/pkg/runtime/generated.proto | 127 - .../k8s.io/apimachinery/pkg/runtime/helper.go | 212 - .../k8s.io/apimachinery/pkg/runtime/interfaces.go | 252 - .../k8s.io/apimachinery/pkg/runtime/register.go | 61 - .../pkg/runtime/schema/generated.pb.go | 63 - .../pkg/runtime/schema/generated.proto | 26 - .../pkg/runtime/schema/group_version.go | 300 - .../apimachinery/pkg/runtime/schema/interfaces.go | 40 - .../k8s.io/apimachinery/pkg/runtime/scheme.go | 754 - .../apimachinery/pkg/runtime/scheme_builder.go | 48 - .../pkg/runtime/serializer/codec_factory.go | 237 - .../pkg/runtime/serializer/json/json.go | 303 - .../pkg/runtime/serializer/json/meta.go | 63 - .../pkg/runtime/serializer/negotiated_codec.go | 43 - .../pkg/runtime/serializer/protobuf/doc.go | 18 - .../pkg/runtime/serializer/protobuf/protobuf.go | 459 - .../pkg/runtime/serializer/protobuf_extension.go | 48 - .../runtime/serializer/recognizer/recognizer.go | 127 - .../pkg/runtime/serializer/streaming/streaming.go | 137 - .../runtime/serializer/versioning/versioning.go | 282 - .../pkg/runtime/swagger_doc_generator.go | 262 - .../k8s.io/apimachinery/pkg/runtime/types.go | 137 - .../k8s.io/apimachinery/pkg/runtime/types_proto.go | 69 - .../pkg/runtime/zz_generated.deepcopy.go | 108 - .../k8s.io/apimachinery/pkg/selection/operator.go | 33 - .../vendor/k8s.io/apimachinery/pkg/types/doc.go | 18 - .../apimachinery/pkg/types/namespacedname.go | 43 - .../k8s.io/apimachinery/pkg/types/nodename.go | 43 - .../vendor/k8s.io/apimachinery/pkg/types/patch.go | 28 - .../vendor/k8s.io/apimachinery/pkg/types/uid.go | 22 - .../k8s.io/apimachinery/pkg/util/cache/cache.go | 83 - .../apimachinery/pkg/util/cache/lruexpirecache.go | 102 - .../k8s.io/apimachinery/pkg/util/clock/clock.go | 348 - .../k8s.io/apimachinery/pkg/util/diff/diff.go | 313 - .../k8s.io/apimachinery/pkg/util/errors/doc.go | 18 - .../k8s.io/apimachinery/pkg/util/errors/errors.go | 201 - .../k8s.io/apimachinery/pkg/util/framer/framer.go | 167 - .../apimachinery/pkg/util/intstr/generated.pb.go | 362 - .../apimachinery/pkg/util/intstr/generated.proto | 43 - .../k8s.io/apimachinery/pkg/util/intstr/intstr.go | 184 - .../k8s.io/apimachinery/pkg/util/json/json.go | 119 - .../k8s.io/apimachinery/pkg/util/mergepatch/OWNERS | 5 - .../apimachinery/pkg/util/mergepatch/errors.go | 102 - .../apimachinery/pkg/util/mergepatch/util.go | 133 - .../apimachinery/pkg/util/naming/from_stack.go | 93 - .../k8s.io/apimachinery/pkg/util/net/http.go | 442 - .../k8s.io/apimachinery/pkg/util/net/interface.go | 416 - .../k8s.io/apimachinery/pkg/util/net/port_range.go | 149 - .../k8s.io/apimachinery/pkg/util/net/port_split.go | 77 - .../k8s.io/apimachinery/pkg/util/net/util.go | 56 - .../apimachinery/pkg/util/runtime/runtime.go | 173 - .../k8s.io/apimachinery/pkg/util/sets/byte.go | 203 - .../k8s.io/apimachinery/pkg/util/sets/doc.go | 20 - .../k8s.io/apimachinery/pkg/util/sets/empty.go | 23 - .../k8s.io/apimachinery/pkg/util/sets/int.go | 203 - .../k8s.io/apimachinery/pkg/util/sets/int64.go | 203 - .../k8s.io/apimachinery/pkg/util/sets/string.go | 203 - .../apimachinery/pkg/util/strategicpatch/OWNERS | 6 - .../apimachinery/pkg/util/strategicpatch/errors.go | 49 - .../apimachinery/pkg/util/strategicpatch/meta.go | 194 - .../apimachinery/pkg/util/strategicpatch/patch.go | 2174 - .../apimachinery/pkg/util/strategicpatch/types.go | 193 - .../k8s.io/apimachinery/pkg/util/uuid/uuid.go | 43 - .../pkg/util/validation/field/errors.go | 259 - .../apimachinery/pkg/util/validation/field/path.go | 91 - .../apimachinery/pkg/util/validation/validation.go | 407 - .../k8s.io/apimachinery/pkg/util/wait/doc.go | 19 - .../k8s.io/apimachinery/pkg/util/wait/wait.go | 405 - .../k8s.io/apimachinery/pkg/util/yaml/decoder.go | 346 - .../vendor/k8s.io/apimachinery/pkg/version/doc.go | 20 - .../k8s.io/apimachinery/pkg/version/helpers.go | 88 - .../k8s.io/apimachinery/pkg/version/types.go | 37 - .../vendor/k8s.io/apimachinery/pkg/watch/doc.go | 19 - .../vendor/k8s.io/apimachinery/pkg/watch/filter.go | 105 - .../vendor/k8s.io/apimachinery/pkg/watch/mux.go | 260 - .../k8s.io/apimachinery/pkg/watch/streamwatcher.go | 119 - .../vendor/k8s.io/apimachinery/pkg/watch/watch.go | 317 - .../pkg/watch/zz_generated.deepcopy.go | 40 - .../third_party/forked/golang/json/OWNERS | 5 - .../third_party/forked/golang/json/fields.go | 513 - .../forked/golang/reflect/deep_equal.go | 388 - cmd/bpa-operator/vendor/k8s.io/client-go/LICENSE | 202 - .../k8s.io/client-go/discovery/cached_discovery.go | 295 - .../k8s.io/client-go/discovery/discovery_client.go | 472 - .../vendor/k8s.io/client-go/discovery/doc.go | 19 - .../k8s.io/client-go/discovery/fake/discovery.go | 144 - .../vendor/k8s.io/client-go/discovery/helper.go | 125 - .../k8s.io/client-go/discovery/round_tripper.go | 62 - .../vendor/k8s.io/client-go/dynamic/fake/simple.go | 370 - .../vendor/k8s.io/client-go/dynamic/interface.go | 59 - .../vendor/k8s.io/client-go/dynamic/scheme.go | 98 - .../vendor/k8s.io/client-go/dynamic/simple.go | 326 - .../k8s.io/client-go/kubernetes/clientset.go | 668 - .../vendor/k8s.io/client-go/kubernetes/doc.go | 20 - .../kubernetes/fake/clientset_generated.go | 384 - .../vendor/k8s.io/client-go/kubernetes/fake/doc.go | 20 - .../k8s.io/client-go/kubernetes/fake/register.go | 118 - .../vendor/k8s.io/client-go/kubernetes/import.go | 19 - .../k8s.io/client-go/kubernetes/scheme/doc.go | 20 - .../k8s.io/client-go/kubernetes/scheme/register.go | 118 - .../v1alpha1/admissionregistration_client.go | 90 - .../typed/admissionregistration/v1alpha1/doc.go | 20 - .../admissionregistration/v1alpha1/fake/doc.go | 20 - .../fake/fake_admissionregistration_client.go | 40 - .../v1alpha1/fake/fake_initializerconfiguration.go | 120 - .../v1alpha1/generated_expansion.go | 21 - .../v1alpha1/initializerconfiguration.go | 164 - .../v1beta1/admissionregistration_client.go | 95 - .../typed/admissionregistration/v1beta1/doc.go | 20 - .../admissionregistration/v1beta1/fake/doc.go | 20 - .../fake/fake_admissionregistration_client.go | 44 - .../fake/fake_mutatingwebhookconfiguration.go | 120 - .../fake/fake_validatingwebhookconfiguration.go | 120 - .../v1beta1/generated_expansion.go | 23 - .../v1beta1/mutatingwebhookconfiguration.go | 164 - .../v1beta1/validatingwebhookconfiguration.go | 164 - .../kubernetes/typed/apps/v1/apps_client.go | 110 - .../kubernetes/typed/apps/v1/controllerrevision.go | 174 - .../kubernetes/typed/apps/v1/daemonset.go | 191 - .../kubernetes/typed/apps/v1/deployment.go | 223 - .../client-go/kubernetes/typed/apps/v1/doc.go | 20 - .../client-go/kubernetes/typed/apps/v1/fake/doc.go | 20 - .../typed/apps/v1/fake/fake_apps_client.go | 56 - .../typed/apps/v1/fake/fake_controllerrevision.go | 128 - .../typed/apps/v1/fake/fake_daemonset.go | 140 - .../typed/apps/v1/fake/fake_deployment.go | 163 - .../typed/apps/v1/fake/fake_replicaset.go | 163 - .../typed/apps/v1/fake/fake_statefulset.go | 163 - .../typed/apps/v1/generated_expansion.go | 29 - .../kubernetes/typed/apps/v1/replicaset.go | 223 - .../kubernetes/typed/apps/v1/statefulset.go | 223 - .../kubernetes/typed/apps/v1beta1/apps_client.go | 100 - .../typed/apps/v1beta1/controllerrevision.go | 174 - .../kubernetes/typed/apps/v1beta1/deployment.go | 191 - .../client-go/kubernetes/typed/apps/v1beta1/doc.go | 20 - .../kubernetes/typed/apps/v1beta1/fake/doc.go | 20 - .../typed/apps/v1beta1/fake/fake_apps_client.go | 48 - .../apps/v1beta1/fake/fake_controllerrevision.go | 128 - .../typed/apps/v1beta1/fake/fake_deployment.go | 140 - .../typed/apps/v1beta1/fake/fake_statefulset.go | 140 - .../typed/apps/v1beta1/generated_expansion.go | 25 - .../kubernetes/typed/apps/v1beta1/statefulset.go | 191 - .../kubernetes/typed/apps/v1beta2/apps_client.go | 110 - .../typed/apps/v1beta2/controllerrevision.go | 174 - .../kubernetes/typed/apps/v1beta2/daemonset.go | 191 - .../kubernetes/typed/apps/v1beta2/deployment.go | 191 - .../client-go/kubernetes/typed/apps/v1beta2/doc.go | 20 - .../kubernetes/typed/apps/v1beta2/fake/doc.go | 20 - .../typed/apps/v1beta2/fake/fake_apps_client.go | 56 - .../apps/v1beta2/fake/fake_controllerrevision.go | 128 - .../typed/apps/v1beta2/fake/fake_daemonset.go | 140 - .../typed/apps/v1beta2/fake/fake_deployment.go | 140 - .../typed/apps/v1beta2/fake/fake_replicaset.go | 140 - .../typed/apps/v1beta2/fake/fake_statefulset.go | 162 - .../typed/apps/v1beta2/generated_expansion.go | 29 - .../kubernetes/typed/apps/v1beta2/replicaset.go | 191 - .../kubernetes/typed/apps/v1beta2/statefulset.go | 222 - .../v1alpha1/auditregistration_client.go | 90 - .../typed/auditregistration/v1alpha1/auditsink.go | 164 - .../typed/auditregistration/v1alpha1/doc.go | 20 - .../typed/auditregistration/v1alpha1/fake/doc.go | 20 - .../v1alpha1/fake/fake_auditregistration_client.go | 40 - .../v1alpha1/fake/fake_auditsink.go | 120 - .../v1alpha1/generated_expansion.go | 21 - .../authentication/v1/authentication_client.go | 90 - .../kubernetes/typed/authentication/v1/doc.go | 20 - .../kubernetes/typed/authentication/v1/fake/doc.go | 20 - .../v1/fake/fake_authentication_client.go | 40 - .../authentication/v1/fake/fake_tokenreview.go | 24 - .../v1/fake/fake_tokenreview_expansion.go | 27 - .../typed/authentication/v1/generated_expansion.go | 19 - .../typed/authentication/v1/tokenreview.go | 46 - .../authentication/v1/tokenreview_expansion.go | 35 - .../v1beta1/authentication_client.go | 90 - .../kubernetes/typed/authentication/v1beta1/doc.go | 20 - .../typed/authentication/v1beta1/fake/doc.go | 20 - .../v1beta1/fake/fake_authentication_client.go | 40 - .../v1beta1/fake/fake_tokenreview.go | 24 - .../v1beta1/fake/fake_tokenreview_expansion.go | 27 - .../authentication/v1beta1/generated_expansion.go | 19 - .../typed/authentication/v1beta1/tokenreview.go | 46 - .../v1beta1/tokenreview_expansion.go | 35 - .../typed/authorization/v1/authorization_client.go | 105 - .../kubernetes/typed/authorization/v1/doc.go | 20 - .../kubernetes/typed/authorization/v1/fake/doc.go | 20 - .../v1/fake/fake_authorization_client.go | 52 - .../v1/fake/fake_localsubjectaccessreview.go | 25 - .../fake_localsubjectaccessreview_expansion.go | 27 - .../v1/fake/fake_selfsubjectaccessreview.go | 24 - .../fake/fake_selfsubjectaccessreview_expansion.go | 27 - .../v1/fake/fake_selfsubjectrulesreview.go | 24 - .../fake/fake_selfsubjectrulesreview_expansion.go | 27 - .../v1/fake/fake_subjectaccessreview.go | 24 - .../v1/fake/fake_subjectaccessreview_expansion.go | 30 - .../typed/authorization/v1/generated_expansion.go | 19 - .../authorization/v1/localsubjectaccessreview.go | 48 - .../v1/localsubjectaccessreview_expansion.go | 36 - .../authorization/v1/selfsubjectaccessreview.go | 46 - .../v1/selfsubjectaccessreview_expansion.go | 35 - .../authorization/v1/selfsubjectrulesreview.go | 46 - .../v1/selfsubjectrulesreview_expansion.go | 35 - .../typed/authorization/v1/subjectaccessreview.go | 46 - .../v1/subjectaccessreview_expansion.go | 36 - .../authorization/v1beta1/authorization_client.go | 105 - .../kubernetes/typed/authorization/v1beta1/doc.go | 20 - .../typed/authorization/v1beta1/fake/doc.go | 20 - .../v1beta1/fake/fake_authorization_client.go | 52 - .../v1beta1/fake/fake_generated_expansion.go | 17 - .../v1beta1/fake/fake_localsubjectaccessreview.go | 25 - .../fake_localsubjectaccessreview_expansion.go | 27 - .../v1beta1/fake/fake_selfsubjectaccessreview.go | 24 - .../fake/fake_selfsubjectaccessreview_expansion.go | 27 - .../v1beta1/fake/fake_selfsubjectrulesreview.go | 24 - .../fake/fake_selfsubjectrulesreview_expansion.go | 27 - .../v1beta1/fake/fake_subjectaccessreview.go | 24 - .../fake/fake_subjectaccessreview_expansion.go | 27 - .../authorization/v1beta1/generated_expansion.go | 19 - .../v1beta1/localsubjectaccessreview.go | 48 - .../v1beta1/localsubjectaccessreview_expansion.go | 36 - .../v1beta1/selfsubjectaccessreview.go | 46 - .../v1beta1/selfsubjectaccessreview_expansion.go | 35 - .../v1beta1/selfsubjectrulesreview.go | 46 - .../v1beta1/selfsubjectrulesreview_expansion.go | 35 - .../authorization/v1beta1/subjectaccessreview.go | 46 - .../v1beta1/subjectaccessreview_expansion.go | 36 - .../typed/autoscaling/v1/autoscaling_client.go | 90 - .../kubernetes/typed/autoscaling/v1/doc.go | 20 - .../kubernetes/typed/autoscaling/v1/fake/doc.go | 20 - .../autoscaling/v1/fake/fake_autoscaling_client.go | 40 - .../v1/fake/fake_horizontalpodautoscaler.go | 140 - .../typed/autoscaling/v1/generated_expansion.go | 21 - .../autoscaling/v1/horizontalpodautoscaler.go | 191 - .../autoscaling/v2beta1/autoscaling_client.go | 90 - .../kubernetes/typed/autoscaling/v2beta1/doc.go | 20 - .../typed/autoscaling/v2beta1/fake/doc.go | 20 - .../v2beta1/fake/fake_autoscaling_client.go | 40 - .../v2beta1/fake/fake_horizontalpodautoscaler.go | 140 - .../autoscaling/v2beta1/generated_expansion.go | 21 - .../autoscaling/v2beta1/horizontalpodautoscaler.go | 191 - .../autoscaling/v2beta2/autoscaling_client.go | 90 - .../kubernetes/typed/autoscaling/v2beta2/doc.go | 20 - .../typed/autoscaling/v2beta2/fake/doc.go | 20 - .../v2beta2/fake/fake_autoscaling_client.go | 40 - .../v2beta2/fake/fake_horizontalpodautoscaler.go | 140 - .../autoscaling/v2beta2/generated_expansion.go | 21 - .../autoscaling/v2beta2/horizontalpodautoscaler.go | 191 - .../kubernetes/typed/batch/v1/batch_client.go | 90 - .../client-go/kubernetes/typed/batch/v1/doc.go | 20 - .../kubernetes/typed/batch/v1/fake/doc.go | 20 - .../typed/batch/v1/fake/fake_batch_client.go | 40 - .../kubernetes/typed/batch/v1/fake/fake_job.go | 140 - .../typed/batch/v1/generated_expansion.go | 21 - .../client-go/kubernetes/typed/batch/v1/job.go | 191 - .../kubernetes/typed/batch/v1beta1/batch_client.go | 90 - .../kubernetes/typed/batch/v1beta1/cronjob.go | 191 - .../kubernetes/typed/batch/v1beta1/doc.go | 20 - .../kubernetes/typed/batch/v1beta1/fake/doc.go | 20 - .../typed/batch/v1beta1/fake/fake_batch_client.go | 40 - .../typed/batch/v1beta1/fake/fake_cronjob.go | 140 - .../typed/batch/v1beta1/generated_expansion.go | 21 - .../typed/batch/v2alpha1/batch_client.go | 90 - .../kubernetes/typed/batch/v2alpha1/cronjob.go | 191 - .../kubernetes/typed/batch/v2alpha1/doc.go | 20 - .../kubernetes/typed/batch/v2alpha1/fake/doc.go | 20 - .../typed/batch/v2alpha1/fake/fake_batch_client.go | 40 - .../typed/batch/v2alpha1/fake/fake_cronjob.go | 140 - .../typed/batch/v2alpha1/generated_expansion.go | 21 - .../certificates/v1beta1/certificates_client.go | 90 - .../v1beta1/certificatesigningrequest.go | 180 - .../v1beta1/certificatesigningrequest_expansion.go | 37 - .../kubernetes/typed/certificates/v1beta1/doc.go | 20 - .../typed/certificates/v1beta1/fake/doc.go | 20 - .../v1beta1/fake/fake_certificates_client.go | 40 - .../v1beta1/fake/fake_certificatesigningrequest.go | 131 - .../fake_certificatesigningrequest_expansion.go | 31 - .../certificates/v1beta1/generated_expansion.go | 19 - .../coordination/v1beta1/coordination_client.go | 90 - .../kubernetes/typed/coordination/v1beta1/doc.go | 20 - .../typed/coordination/v1beta1/fake/doc.go | 20 - .../v1beta1/fake/fake_coordination_client.go | 40 - .../typed/coordination/v1beta1/fake/fake_lease.go | 128 - .../coordination/v1beta1/generated_expansion.go | 21 - .../kubernetes/typed/coordination/v1beta1/lease.go | 174 - .../kubernetes/typed/core/v1/componentstatus.go | 164 - .../kubernetes/typed/core/v1/configmap.go | 174 - .../kubernetes/typed/core/v1/core_client.go | 165 - .../client-go/kubernetes/typed/core/v1/doc.go | 20 - .../kubernetes/typed/core/v1/endpoints.go | 174 - .../client-go/kubernetes/typed/core/v1/event.go | 174 - .../kubernetes/typed/core/v1/event_expansion.go | 164 - .../client-go/kubernetes/typed/core/v1/fake/doc.go | 20 - .../typed/core/v1/fake/fake_componentstatus.go | 120 - .../typed/core/v1/fake/fake_configmap.go | 128 - .../typed/core/v1/fake/fake_core_client.go | 100 - .../typed/core/v1/fake/fake_endpoints.go | 128 - .../kubernetes/typed/core/v1/fake/fake_event.go | 128 - .../typed/core/v1/fake/fake_event_expansion.go | 93 - .../typed/core/v1/fake/fake_limitrange.go | 128 - .../typed/core/v1/fake/fake_namespace.go | 123 - .../typed/core/v1/fake/fake_namespace_expansion.go | 37 - .../kubernetes/typed/core/v1/fake/fake_node.go | 131 - .../typed/core/v1/fake/fake_node_expansion.go | 36 - .../typed/core/v1/fake/fake_persistentvolume.go | 131 - .../core/v1/fake/fake_persistentvolumeclaim.go | 140 - .../kubernetes/typed/core/v1/fake/fake_pod.go | 140 - .../typed/core/v1/fake/fake_pod_expansion.go | 69 - .../typed/core/v1/fake/fake_podtemplate.go | 128 - .../core/v1/fake/fake_replicationcontroller.go | 163 - .../typed/core/v1/fake/fake_resourcequota.go | 140 - .../kubernetes/typed/core/v1/fake/fake_secret.go | 128 - .../kubernetes/typed/core/v1/fake/fake_service.go | 132 - .../typed/core/v1/fake/fake_service_expansion.go | 26 - .../typed/core/v1/fake/fake_serviceaccount.go | 128 - .../core/v1/fake/fake_serviceaccount_expansion.go | 31 - .../typed/core/v1/generated_expansion.go | 39 - .../kubernetes/typed/core/v1/limitrange.go | 174 - .../kubernetes/typed/core/v1/namespace.go | 164 - .../typed/core/v1/namespace_expansion.go | 31 - .../client-go/kubernetes/typed/core/v1/node.go | 180 - .../kubernetes/typed/core/v1/node_expansion.go | 43 - .../kubernetes/typed/core/v1/persistentvolume.go | 180 - .../typed/core/v1/persistentvolumeclaim.go | 191 - .../client-go/kubernetes/typed/core/v1/pod.go | 191 - .../kubernetes/typed/core/v1/pod_expansion.go | 45 - .../kubernetes/typed/core/v1/podtemplate.go | 174 - .../typed/core/v1/replicationcontroller.go | 223 - .../kubernetes/typed/core/v1/resourcequota.go | 191 - .../client-go/kubernetes/typed/core/v1/secret.go | 174 - .../client-go/kubernetes/typed/core/v1/service.go | 174 - .../kubernetes/typed/core/v1/service_expansion.go | 41 - .../kubernetes/typed/core/v1/serviceaccount.go | 174 - .../typed/core/v1/serviceaccount_expansion.go | 41 - .../kubernetes/typed/events/v1beta1/doc.go | 20 - .../kubernetes/typed/events/v1beta1/event.go | 174 - .../typed/events/v1beta1/events_client.go | 90 - .../kubernetes/typed/events/v1beta1/fake/doc.go | 20 - .../typed/events/v1beta1/fake/fake_event.go | 128 - .../events/v1beta1/fake/fake_events_client.go | 40 - .../typed/events/v1beta1/generated_expansion.go | 21 - .../typed/extensions/v1beta1/daemonset.go | 191 - .../typed/extensions/v1beta1/deployment.go | 222 - .../extensions/v1beta1/deployment_expansion.go | 29 - .../kubernetes/typed/extensions/v1beta1/doc.go | 20 - .../typed/extensions/v1beta1/extensions_client.go | 110 - .../typed/extensions/v1beta1/fake/doc.go | 20 - .../extensions/v1beta1/fake/fake_daemonset.go | 140 - .../extensions/v1beta1/fake/fake_deployment.go | 162 - .../v1beta1/fake/fake_deployment_expansion.go | 33 - .../v1beta1/fake/fake_extensions_client.go | 56 - .../typed/extensions/v1beta1/fake/fake_ingress.go | 140 - .../v1beta1/fake/fake_podsecuritypolicy.go | 120 - .../extensions/v1beta1/fake/fake_replicaset.go | 162 - .../extensions/v1beta1/generated_expansion.go | 27 - .../kubernetes/typed/extensions/v1beta1/ingress.go | 191 - .../typed/extensions/v1beta1/podsecuritypolicy.go | 164 - .../typed/extensions/v1beta1/replicaset.go | 222 - .../kubernetes/typed/networking/v1/doc.go | 20 - .../kubernetes/typed/networking/v1/fake/doc.go | 20 - .../networking/v1/fake/fake_networking_client.go | 40 - .../typed/networking/v1/fake/fake_networkpolicy.go | 128 - .../typed/networking/v1/generated_expansion.go | 21 - .../typed/networking/v1/networking_client.go | 90 - .../typed/networking/v1/networkpolicy.go | 174 - .../kubernetes/typed/policy/v1beta1/doc.go | 20 - .../kubernetes/typed/policy/v1beta1/eviction.go | 48 - .../typed/policy/v1beta1/eviction_expansion.go | 38 - .../kubernetes/typed/policy/v1beta1/fake/doc.go | 20 - .../typed/policy/v1beta1/fake/fake_eviction.go | 25 - .../policy/v1beta1/fake/fake_eviction_expansion.go | 34 - .../v1beta1/fake/fake_poddisruptionbudget.go | 140 - .../policy/v1beta1/fake/fake_podsecuritypolicy.go | 120 - .../policy/v1beta1/fake/fake_policy_client.go | 48 - .../typed/policy/v1beta1/generated_expansion.go | 23 - .../typed/policy/v1beta1/poddisruptionbudget.go | 191 - .../typed/policy/v1beta1/podsecuritypolicy.go | 164 - .../typed/policy/v1beta1/policy_client.go | 100 - .../kubernetes/typed/rbac/v1/clusterrole.go | 164 - .../kubernetes/typed/rbac/v1/clusterrolebinding.go | 164 - .../client-go/kubernetes/typed/rbac/v1/doc.go | 20 - .../client-go/kubernetes/typed/rbac/v1/fake/doc.go | 20 - .../typed/rbac/v1/fake/fake_clusterrole.go | 120 - .../typed/rbac/v1/fake/fake_clusterrolebinding.go | 120 - .../typed/rbac/v1/fake/fake_rbac_client.go | 52 - .../kubernetes/typed/rbac/v1/fake/fake_role.go | 128 - .../typed/rbac/v1/fake/fake_rolebinding.go | 128 - .../typed/rbac/v1/generated_expansion.go | 27 - .../kubernetes/typed/rbac/v1/rbac_client.go | 105 - .../client-go/kubernetes/typed/rbac/v1/role.go | 174 - .../kubernetes/typed/rbac/v1/rolebinding.go | 174 - .../kubernetes/typed/rbac/v1alpha1/clusterrole.go | 164 - .../typed/rbac/v1alpha1/clusterrolebinding.go | 164 - .../kubernetes/typed/rbac/v1alpha1/doc.go | 20 - .../kubernetes/typed/rbac/v1alpha1/fake/doc.go | 20 - .../typed/rbac/v1alpha1/fake/fake_clusterrole.go | 120 - .../rbac/v1alpha1/fake/fake_clusterrolebinding.go | 120 - .../typed/rbac/v1alpha1/fake/fake_rbac_client.go | 52 - .../typed/rbac/v1alpha1/fake/fake_role.go | 128 - .../typed/rbac/v1alpha1/fake/fake_rolebinding.go | 128 - .../typed/rbac/v1alpha1/generated_expansion.go | 27 - .../kubernetes/typed/rbac/v1alpha1/rbac_client.go | 105 - .../kubernetes/typed/rbac/v1alpha1/role.go | 174 - .../kubernetes/typed/rbac/v1alpha1/rolebinding.go | 174 - .../kubernetes/typed/rbac/v1beta1/clusterrole.go | 164 - .../typed/rbac/v1beta1/clusterrolebinding.go | 164 - .../client-go/kubernetes/typed/rbac/v1beta1/doc.go | 20 - .../kubernetes/typed/rbac/v1beta1/fake/doc.go | 20 - .../typed/rbac/v1beta1/fake/fake_clusterrole.go | 120 - .../rbac/v1beta1/fake/fake_clusterrolebinding.go | 120 - .../typed/rbac/v1beta1/fake/fake_rbac_client.go | 52 - .../typed/rbac/v1beta1/fake/fake_role.go | 128 - .../typed/rbac/v1beta1/fake/fake_rolebinding.go | 128 - .../typed/rbac/v1beta1/generated_expansion.go | 27 - .../kubernetes/typed/rbac/v1beta1/rbac_client.go | 105 - .../kubernetes/typed/rbac/v1beta1/role.go | 174 - .../kubernetes/typed/rbac/v1beta1/rolebinding.go | 174 - .../kubernetes/typed/scheduling/v1alpha1/doc.go | 20 - .../typed/scheduling/v1alpha1/fake/doc.go | 20 - .../scheduling/v1alpha1/fake/fake_priorityclass.go | 120 - .../v1alpha1/fake/fake_scheduling_client.go | 40 - .../scheduling/v1alpha1/generated_expansion.go | 21 - .../typed/scheduling/v1alpha1/priorityclass.go | 164 - .../typed/scheduling/v1alpha1/scheduling_client.go | 90 - .../kubernetes/typed/scheduling/v1beta1/doc.go | 20 - .../typed/scheduling/v1beta1/fake/doc.go | 20 - .../scheduling/v1beta1/fake/fake_priorityclass.go | 120 - .../v1beta1/fake/fake_scheduling_client.go | 40 - .../scheduling/v1beta1/generated_expansion.go | 21 - .../typed/scheduling/v1beta1/priorityclass.go | 164 - .../typed/scheduling/v1beta1/scheduling_client.go | 90 - .../kubernetes/typed/settings/v1alpha1/doc.go | 20 - .../kubernetes/typed/settings/v1alpha1/fake/doc.go | 20 - .../typed/settings/v1alpha1/fake/fake_podpreset.go | 128 - .../settings/v1alpha1/fake/fake_settings_client.go | 40 - .../typed/settings/v1alpha1/generated_expansion.go | 21 - .../typed/settings/v1alpha1/podpreset.go | 174 - .../typed/settings/v1alpha1/settings_client.go | 90 - .../client-go/kubernetes/typed/storage/v1/doc.go | 20 - .../kubernetes/typed/storage/v1/fake/doc.go | 20 - .../typed/storage/v1/fake/fake_storage_client.go | 44 - .../typed/storage/v1/fake/fake_storageclass.go | 120 - .../typed/storage/v1/fake/fake_volumeattachment.go | 131 - .../typed/storage/v1/generated_expansion.go | 23 - .../kubernetes/typed/storage/v1/storage_client.go | 95 - .../kubernetes/typed/storage/v1/storageclass.go | 164 - .../typed/storage/v1/volumeattachment.go | 180 - .../kubernetes/typed/storage/v1alpha1/doc.go | 20 - .../kubernetes/typed/storage/v1alpha1/fake/doc.go | 20 - .../storage/v1alpha1/fake/fake_storage_client.go | 40 - .../storage/v1alpha1/fake/fake_volumeattachment.go | 131 - .../typed/storage/v1alpha1/generated_expansion.go | 21 - .../typed/storage/v1alpha1/storage_client.go | 90 - .../typed/storage/v1alpha1/volumeattachment.go | 180 - .../kubernetes/typed/storage/v1beta1/doc.go | 20 - .../kubernetes/typed/storage/v1beta1/fake/doc.go | 20 - .../storage/v1beta1/fake/fake_storage_client.go | 44 - .../storage/v1beta1/fake/fake_storageclass.go | 120 - .../storage/v1beta1/fake/fake_volumeattachment.go | 131 - .../typed/storage/v1beta1/generated_expansion.go | 23 - .../typed/storage/v1beta1/storage_client.go | 95 - .../typed/storage/v1beta1/storageclass.go | 164 - .../typed/storage/v1beta1/volumeattachment.go | 180 - .../client-go/pkg/apis/clientauthentication/OWNERS | 7 - .../client-go/pkg/apis/clientauthentication/doc.go | 20 - .../pkg/apis/clientauthentication/register.go | 50 - .../pkg/apis/clientauthentication/types.go | 77 - .../pkg/apis/clientauthentication/v1alpha1/doc.go | 24 - .../apis/clientauthentication/v1alpha1/register.go | 55 - .../apis/clientauthentication/v1alpha1/types.go | 78 - .../v1alpha1/zz_generated.conversion.go | 176 - .../v1alpha1/zz_generated.deepcopy.go | 128 - .../v1alpha1/zz_generated.defaults.go | 32 - .../clientauthentication/v1beta1/conversion.go | 26 - .../pkg/apis/clientauthentication/v1beta1/doc.go | 24 - .../apis/clientauthentication/v1beta1/register.go | 55 - .../pkg/apis/clientauthentication/v1beta1/types.go | 59 - .../v1beta1/zz_generated.conversion.go | 142 - .../v1beta1/zz_generated.deepcopy.go | 92 - .../v1beta1/zz_generated.defaults.go | 32 - .../clientauthentication/zz_generated.deepcopy.go | 128 - .../k8s.io/client-go/pkg/version/.gitattributes | 1 - .../vendor/k8s.io/client-go/pkg/version/base.go | 63 - .../vendor/k8s.io/client-go/pkg/version/def.bzl | 38 - .../vendor/k8s.io/client-go/pkg/version/doc.go | 21 - .../vendor/k8s.io/client-go/pkg/version/version.go | 42 - .../k8s.io/client-go/plugin/pkg/client/auth/OWNERS | 7 - .../plugin/pkg/client/auth/azure/README.md | 50 - .../plugin/pkg/client/auth/azure/azure.go | 360 - .../client-go/plugin/pkg/client/auth/exec/exec.go | 361 - .../client-go/plugin/pkg/client/auth/gcp/OWNERS | 6 - .../client-go/plugin/pkg/client/auth/gcp/gcp.go | 383 - .../client-go/plugin/pkg/client/auth/oidc/OWNERS | 5 - .../client-go/plugin/pkg/client/auth/oidc/oidc.go | 379 - .../plugin/pkg/client/auth/openstack/openstack.go | 193 - .../client-go/plugin/pkg/client/auth/plugins.go | 25 - .../vendor/k8s.io/client-go/rest/OWNERS | 24 - .../vendor/k8s.io/client-go/rest/client.go | 258 - .../vendor/k8s.io/client-go/rest/config.go | 472 - .../vendor/k8s.io/client-go/rest/plugin.go | 73 - .../vendor/k8s.io/client-go/rest/request.go | 1201 - .../vendor/k8s.io/client-go/rest/transport.go | 116 - .../vendor/k8s.io/client-go/rest/url_utils.go | 97 - .../vendor/k8s.io/client-go/rest/urlbackoff.go | 107 - .../vendor/k8s.io/client-go/rest/watch/decoder.go | 72 - .../vendor/k8s.io/client-go/rest/watch/encoder.go | 56 - .../k8s.io/client-go/rest/zz_generated.deepcopy.go | 52 - .../client-go/restmapper/category_expansion.go | 119 - .../k8s.io/client-go/restmapper/discovery.go | 339 - .../vendor/k8s.io/client-go/restmapper/shortcut.go | 172 - .../vendor/k8s.io/client-go/testing/actions.go | 671 - .../vendor/k8s.io/client-go/testing/fake.go | 213 - .../vendor/k8s.io/client-go/testing/fixture.go | 547 - .../third_party/forked/golang/template/exec.go | 94 - .../third_party/forked/golang/template/funcs.go | 599 - .../vendor/k8s.io/client-go/tools/auth/OWNERS | 7 - .../k8s.io/client-go/tools/auth/clientauth.go | 125 - .../vendor/k8s.io/client-go/tools/cache/OWNERS | 50 - .../k8s.io/client-go/tools/cache/controller.go | 394 - .../k8s.io/client-go/tools/cache/delta_fifo.go | 651 - .../vendor/k8s.io/client-go/tools/cache/doc.go | 24 - .../client-go/tools/cache/expiration_cache.go | 208 - .../tools/cache/expiration_cache_fakes.go | 54 - .../client-go/tools/cache/fake_custom_store.go | 102 - .../vendor/k8s.io/client-go/tools/cache/fifo.go | 358 - .../vendor/k8s.io/client-go/tools/cache/heap.go | 323 - .../vendor/k8s.io/client-go/tools/cache/index.go | 87 - .../vendor/k8s.io/client-go/tools/cache/listers.go | 160 - .../k8s.io/client-go/tools/cache/listwatch.go | 104 - .../k8s.io/client-go/tools/cache/mutation_cache.go | 261 - .../client-go/tools/cache/mutation_detector.go | 130 - .../k8s.io/client-go/tools/cache/reflector.go | 378 - .../client-go/tools/cache/reflector_metrics.go | 119 - .../client-go/tools/cache/shared_informer.go | 597 - .../vendor/k8s.io/client-go/tools/cache/store.go | 244 - .../client-go/tools/cache/thread_safe_store.go | 304 - .../k8s.io/client-go/tools/cache/undelta_store.go | 83 - .../k8s.io/client-go/tools/clientcmd/api/doc.go | 19 - .../client-go/tools/clientcmd/api/helpers.go | 188 - .../client-go/tools/clientcmd/api/latest/latest.go | 61 - .../client-go/tools/clientcmd/api/register.go | 46 - .../k8s.io/client-go/tools/clientcmd/api/types.go | 218 - .../client-go/tools/clientcmd/api/v1/conversion.go | 244 - .../k8s.io/client-go/tools/clientcmd/api/v1/doc.go | 19 - .../client-go/tools/clientcmd/api/v1/register.go | 56 - .../client-go/tools/clientcmd/api/v1/types.go | 203 - .../clientcmd/api/v1/zz_generated.deepcopy.go | 348 - .../tools/clientcmd/api/zz_generated.deepcopy.go | 324 - .../client-go/tools/clientcmd/auth_loaders.go | 111 - .../client-go/tools/clientcmd/client_config.go | 569 - .../k8s.io/client-go/tools/clientcmd/config.go | 490 - .../vendor/k8s.io/client-go/tools/clientcmd/doc.go | 37 - .../k8s.io/client-go/tools/clientcmd/flag.go | 49 - .../k8s.io/client-go/tools/clientcmd/helpers.go | 35 - .../k8s.io/client-go/tools/clientcmd/loader.go | 633 - .../tools/clientcmd/merged_client_builder.go | 168 - .../k8s.io/client-go/tools/clientcmd/overrides.go | 247 - .../k8s.io/client-go/tools/clientcmd/validation.go | 298 - .../k8s.io/client-go/tools/leaderelection/OWNERS | 13 - .../tools/leaderelection/healthzadaptor.go | 69 - .../tools/leaderelection/leaderelection.go | 336 - .../leaderelection/resourcelock/configmaplock.go | 109 - .../leaderelection/resourcelock/endpointslock.go | 104 - .../tools/leaderelection/resourcelock/interface.go | 102 - .../vendor/k8s.io/client-go/tools/metrics/OWNERS | 7 - .../k8s.io/client-go/tools/metrics/metrics.go | 61 - .../vendor/k8s.io/client-go/tools/pager/pager.go | 117 - .../vendor/k8s.io/client-go/tools/record/OWNERS | 27 - .../vendor/k8s.io/client-go/tools/record/doc.go | 18 - .../vendor/k8s.io/client-go/tools/record/event.go | 322 - .../k8s.io/client-go/tools/record/events_cache.go | 462 - .../vendor/k8s.io/client-go/tools/record/fake.go | 58 - .../vendor/k8s.io/client-go/tools/reference/ref.go | 126 - .../vendor/k8s.io/client-go/transport/OWNERS | 7 - .../vendor/k8s.io/client-go/transport/cache.go | 117 - .../vendor/k8s.io/client-go/transport/config.go | 115 - .../k8s.io/client-go/transport/round_trippers.go | 564 - .../k8s.io/client-go/transport/token_source.go | 140 - .../vendor/k8s.io/client-go/transport/transport.go | 169 - .../k8s.io/client-go/util/buffer/ring_growing.go | 72 - .../vendor/k8s.io/client-go/util/cert/OWNERS | 7 - .../vendor/k8s.io/client-go/util/cert/cert.go | 269 - .../vendor/k8s.io/client-go/util/cert/csr.go | 75 - .../vendor/k8s.io/client-go/util/cert/io.go | 193 - .../vendor/k8s.io/client-go/util/cert/pem.go | 269 - .../client-go/util/connrotation/connrotation.go | 105 - .../k8s.io/client-go/util/flowcontrol/backoff.go | 149 - .../k8s.io/client-go/util/flowcontrol/throttle.go | 143 - .../k8s.io/client-go/util/homedir/homedir.go | 47 - .../k8s.io/client-go/util/integer/integer.go | 67 - .../vendor/k8s.io/client-go/util/jsonpath/doc.go | 20 - .../k8s.io/client-go/util/jsonpath/jsonpath.go | 517 - .../vendor/k8s.io/client-go/util/jsonpath/node.go | 255 - .../k8s.io/client-go/util/jsonpath/parser.go | 525 - .../vendor/k8s.io/client-go/util/retry/OWNERS | 2 - .../vendor/k8s.io/client-go/util/retry/util.go | 79 - .../util/workqueue/default_rate_limiters.go | 211 - .../client-go/util/workqueue/delaying_queue.go | 255 - .../vendor/k8s.io/client-go/util/workqueue/doc.go | 26 - .../k8s.io/client-go/util/workqueue/metrics.go | 258 - .../client-go/util/workqueue/parallelizer.go | 71 - .../k8s.io/client-go/util/workqueue/queue.go | 212 - .../util/workqueue/rate_limitting_queue.go | 69 - cmd/bpa-operator/vendor/k8s.io/gengo/LICENSE | 202 - cmd/bpa-operator/vendor/k8s.io/gengo/args/args.go | 199 - .../k8s.io/gengo/generator/default_generator.go | 62 - .../k8s.io/gengo/generator/default_package.go | 72 - .../vendor/k8s.io/gengo/generator/doc.go | 31 - .../vendor/k8s.io/gengo/generator/error_tracker.go | 50 - .../vendor/k8s.io/gengo/generator/execute.go | 312 - .../vendor/k8s.io/gengo/generator/generator.go | 219 - .../k8s.io/gengo/generator/import_tracker.go | 64 - .../k8s.io/gengo/generator/snippet_writer.go | 154 - cmd/bpa-operator/vendor/k8s.io/gengo/namer/doc.go | 31 - .../vendor/k8s.io/gengo/namer/import_tracker.go | 112 - .../vendor/k8s.io/gengo/namer/namer.go | 383 - .../vendor/k8s.io/gengo/namer/order.go | 69 - .../vendor/k8s.io/gengo/namer/plural_namer.go | 120 - cmd/bpa-operator/vendor/k8s.io/gengo/parser/doc.go | 19 - .../vendor/k8s.io/gengo/parser/parse.go | 813 - .../vendor/k8s.io/gengo/types/comments.go | 82 - cmd/bpa-operator/vendor/k8s.io/gengo/types/doc.go | 19 - .../vendor/k8s.io/gengo/types/flatten.go | 57 - .../vendor/k8s.io/gengo/types/types.go | 499 - cmd/bpa-operator/vendor/k8s.io/klog/.travis.yml | 15 - .../vendor/k8s.io/klog/CONTRIBUTING.md | 22 - cmd/bpa-operator/vendor/k8s.io/klog/LICENSE | 191 - cmd/bpa-operator/vendor/k8s.io/klog/OWNERS | 19 - cmd/bpa-operator/vendor/k8s.io/klog/README.md | 97 - cmd/bpa-operator/vendor/k8s.io/klog/RELEASE.md | 9 - .../vendor/k8s.io/klog/SECURITY_CONTACTS | 20 - .../vendor/k8s.io/klog/code-of-conduct.md | 3 - cmd/bpa-operator/vendor/k8s.io/klog/klog.go | 1288 - cmd/bpa-operator/vendor/k8s.io/klog/klog_file.go | 139 - .../vendor/k8s.io/kube-openapi/LICENSE | 202 - .../k8s.io/kube-openapi/pkg/common/common.go | 174 - .../vendor/k8s.io/kube-openapi/pkg/common/doc.go | 19 - .../k8s.io/kube-openapi/pkg/util/proto/OWNERS | 2 - .../k8s.io/kube-openapi/pkg/util/proto/doc.go | 19 - .../k8s.io/kube-openapi/pkg/util/proto/document.go | 318 - .../k8s.io/kube-openapi/pkg/util/proto/openapi.go | 278 - .../vendor/k8s.io/kube-state-metrics/LICENSE | 201 - .../kube-state-metrics/pkg/collector/collector.go | 43 - .../k8s.io/kube-state-metrics/pkg/metric/family.go | 38 - .../kube-state-metrics/pkg/metric/generator.go | 102 - .../k8s.io/kube-state-metrics/pkg/metric/metric.go | 124 - .../pkg/metrics_store/metrics_store.go | 165 - cmd/bpa-operator/vendor/modules.txt | 537 - .../vendor/sigs.k8s.io/controller-runtime/LICENSE | 201 - .../controller-runtime/pkg/cache/cache.go | 121 - .../controller-runtime/pkg/cache/informer_cache.go | 178 - .../pkg/cache/internal/cache_reader.go | 185 - .../pkg/cache/internal/deleg_map.go | 96 - .../pkg/cache/internal/informers_map.go | 281 - .../pkg/client/apiutil/apimachinery.go | 88 - .../controller-runtime/pkg/client/client.go | 162 - .../controller-runtime/pkg/client/client_cache.go | 145 - .../controller-runtime/pkg/client/config/config.go | 95 - .../controller-runtime/pkg/client/config/doc.go | 18 - .../controller-runtime/pkg/client/fake/client.go | 217 - .../controller-runtime/pkg/client/fake/doc.go | 27 - .../controller-runtime/pkg/client/interfaces.go | 292 - .../controller-runtime/pkg/client/split.go | 59 - .../controller-runtime/pkg/client/typed_client.go | 132 - .../pkg/client/unstructured_client.go | 162 - .../pkg/controller/controller.go | 94 - .../controller-runtime/pkg/controller/doc.go | 25 - .../controller-runtime/pkg/event/doc.go | 25 - .../controller-runtime/pkg/event/event.go | 73 - .../controller-runtime/pkg/handler/doc.go | 36 - .../controller-runtime/pkg/handler/enqueue.go | 91 - .../pkg/handler/enqueue_mapped.go | 94 - .../pkg/handler/enqueue_owner.go | 165 - .../controller-runtime/pkg/handler/eventhandler.go | 104 - .../pkg/internal/controller/controller.go | 252 - .../pkg/internal/controller/metrics/metrics.go | 59 - .../pkg/internal/objectutil/filter.go | 42 - .../pkg/internal/recorder/recorder.go | 61 - .../controller-runtime/pkg/leaderelection/doc.go | 20 - .../pkg/leaderelection/leader_election.go | 109 - .../controller-runtime/pkg/manager/doc.go | 21 - .../controller-runtime/pkg/manager/internal.go | 291 - .../controller-runtime/pkg/manager/manager.go | 294 - .../controller-runtime/pkg/manager/testutil.go | 25 - .../pkg/metrics/client_go_adapter.go | 299 - .../controller-runtime/pkg/metrics/doc.go | 20 - .../controller-runtime/pkg/metrics/listener.go | 47 - .../controller-runtime/pkg/metrics/registry.go | 23 - .../controller-runtime/pkg/patch/doc.go | 33 - .../controller-runtime/pkg/patch/patch.go | 45 - .../controller-runtime/pkg/predicate/doc.go | 20 - .../controller-runtime/pkg/predicate/predicate.go | 118 - .../controller-runtime/pkg/reconcile/doc.go | 21 - .../controller-runtime/pkg/reconcile/reconcile.go | 92 - .../controller-runtime/pkg/recorder/recorder.go | 27 - .../controller-runtime/pkg/runtime/inject/doc.go | 22 - .../pkg/runtime/inject/inject.go | 131 - .../controller-runtime/pkg/runtime/log/deleg.go | 126 - .../pkg/runtime/log/kube_helpers.go | 129 - .../controller-runtime/pkg/runtime/log/log.go | 85 - .../controller-runtime/pkg/runtime/log/null.go | 60 - .../pkg/runtime/scheme/scheme.go | 56 - .../controller-runtime/pkg/runtime/signals/doc.go | 18 - .../pkg/runtime/signals/signal.go | 43 - .../pkg/runtime/signals/signal_posix.go | 26 - .../pkg/runtime/signals/signal_windows.go | 23 - .../controller-runtime/pkg/source/doc.go | 22 - .../pkg/source/internal/eventsource.go | 177 - .../controller-runtime/pkg/source/source.go | 281 - .../pkg/webhook/admission/decode.go | 48 - .../pkg/webhook/admission/doc.go | 101 - .../pkg/webhook/admission/http.go | 115 - .../pkg/webhook/admission/response.go | 70 - .../pkg/webhook/admission/types/types.go | 44 - .../pkg/webhook/admission/webhook.go | 259 - .../pkg/webhook/internal/metrics/metrics.go | 51 - .../pkg/webhook/types/webhook.go | 28 - .../vendor/sigs.k8s.io/controller-tools/LICENSE | 201 - .../pkg/crd/generator/generator.go | 217 - .../controller-tools/pkg/crd/util/util.go | 130 - .../pkg/internal/codegen/parse/apis.go | 287 - .../pkg/internal/codegen/parse/context.go | 42 - .../pkg/internal/codegen/parse/crd.go | 639 - .../pkg/internal/codegen/parse/index.go | 161 - .../pkg/internal/codegen/parse/parser.go | 151 - .../pkg/internal/codegen/parse/util.go | 539 - .../controller-tools/pkg/internal/codegen/types.go | 213 - .../controller-tools/pkg/internal/general/util.go | 102 - .../sigs.k8s.io/controller-tools/pkg/util/util.go | 77 - .../vendor/sigs.k8s.io/yaml/.gitignore | 20 - .../vendor/sigs.k8s.io/yaml/.travis.yml | 14 - .../vendor/sigs.k8s.io/yaml/CONTRIBUTING.md | 31 - cmd/bpa-operator/vendor/sigs.k8s.io/yaml/LICENSE | 50 - cmd/bpa-operator/vendor/sigs.k8s.io/yaml/OWNERS | 25 - cmd/bpa-operator/vendor/sigs.k8s.io/yaml/README.md | 121 - .../vendor/sigs.k8s.io/yaml/RELEASE.md | 9 - .../vendor/sigs.k8s.io/yaml/SECURITY_CONTACTS | 17 - .../vendor/sigs.k8s.io/yaml/code-of-conduct.md | 3 - cmd/bpa-operator/vendor/sigs.k8s.io/yaml/fields.go | 502 - cmd/bpa-operator/vendor/sigs.k8s.io/yaml/yaml.go | 319 - .../vendor/sigs.k8s.io/yaml/yaml_go110.go | 14 - cmd/bpa-operator/version/version.go | 5 - 2861 files changed, 899999 deletions(-) delete mode 100644 cmd/bpa-operator/.gitignore delete mode 100644 cmd/bpa-operator/Makefile delete mode 100755 cmd/bpa-operator/bpa_operator_launch.sh delete mode 100644 cmd/bpa-operator/build/Dockerfile delete mode 100755 cmd/bpa-operator/build/bin/entrypoint delete mode 100755 cmd/bpa-operator/build/bin/user_setup delete mode 100644 cmd/bpa-operator/cmd/manager/main.go delete mode 100644 cmd/bpa-operator/deploy/crds/provisioning-crd/bpa_v1alpha1_provisioning_cr.yaml delete mode 100644 cmd/bpa-operator/deploy/crds/provisioning-crd/bpa_v1alpha1_provisioning_cr_2.yaml delete mode 100644 cmd/bpa-operator/deploy/crds/provisioning-crd/bpa_v1alpha1_provisioning_cr_multiple_3.yaml delete mode 100644 cmd/bpa-operator/deploy/crds/provisioning-crd/bpa_v1alpha1_provisioning_cr_sample_multiple_1.yaml delete mode 100644 cmd/bpa-operator/deploy/crds/provisioning-crd/bpa_v1alpha1_provisioning_cr_sample_multiple_2.yaml delete mode 100644 cmd/bpa-operator/deploy/crds/provisioning-crd/bpa_v1alpha1_provisioning_cr_sample_single.yaml delete mode 100644 cmd/bpa-operator/deploy/crds/provisioning-crd/bpa_v1alpha1_provisioning_cr_sample_single_2.yaml delete mode 100644 cmd/bpa-operator/deploy/crds/provisioning-crd/bpa_v1alpha1_provisioning_cr_sample_single_3.yaml delete mode 100644 cmd/bpa-operator/deploy/crds/provisioning-crd/bpa_v1alpha1_provisioning_cr_vm_multiple.yaml delete mode 100644 cmd/bpa-operator/deploy/crds/provisioning-crd/bpa_v1alpha1_provisioning_cr_vm_single.yaml delete mode 100644 cmd/bpa-operator/deploy/crds/provisioning-crd/bpa_v1alpha1_provisioning_crd.yaml delete mode 100644 cmd/bpa-operator/deploy/crds/provisioning-crd/provisioning_cr_kud_plugins_sample.yaml delete mode 100644 cmd/bpa-operator/deploy/crds/software-crd/bpa_v1alpha1_software_cr.yaml delete mode 100644 cmd/bpa-operator/deploy/crds/software-crd/bpa_v1alpha1_software_crd.yaml delete mode 100644 cmd/bpa-operator/deploy/crds/software-crd/test_bpa_v1alpha1_software_cr.yaml delete mode 100644 cmd/bpa-operator/deploy/kud-installer.yaml delete mode 100644 cmd/bpa-operator/deploy/netattachdef-flannel-vm.yaml delete mode 100644 cmd/bpa-operator/deploy/operator.yaml delete mode 100644 cmd/bpa-operator/deploy/role.yaml delete mode 100644 cmd/bpa-operator/deploy/role_binding.yaml delete mode 100644 cmd/bpa-operator/deploy/service_account.yaml delete mode 100644 cmd/bpa-operator/deploy/virtlet-deployment-sample.yaml delete mode 100755 cmd/bpa-operator/e2etest/bpa_bmh_verifier.sh delete mode 100644 cmd/bpa-operator/e2etest/bpa_remote_compute_cr_sample.yaml delete mode 100644 cmd/bpa-operator/e2etest/bpa_remote_virtletvm_cr.yaml delete mode 100644 cmd/bpa-operator/e2etest/bpa_remote_virtletvm_cr_multi.yaml delete mode 100755 cmd/bpa-operator/e2etest/bpa_remote_virtletvm_verifier.sh delete mode 100644 cmd/bpa-operator/e2etest/e2e_virtletvm_test_provisioning_cr.yaml delete mode 100644 cmd/bpa-operator/e2etest/test_bmh_provisioning_cr.yaml delete mode 100644 cmd/bpa-operator/go.mod delete mode 100644 cmd/bpa-operator/go.sum delete mode 100644 cmd/bpa-operator/pkg/apis/addtoscheme_bpa_v1alpha1.go delete mode 100644 cmd/bpa-operator/pkg/apis/apis.go delete mode 100644 cmd/bpa-operator/pkg/apis/bpa/group.go delete mode 100644 cmd/bpa-operator/pkg/apis/bpa/v1alpha1/doc.go delete mode 100644 cmd/bpa-operator/pkg/apis/bpa/v1alpha1/provisioning_types.go delete mode 100644 cmd/bpa-operator/pkg/apis/bpa/v1alpha1/register.go delete mode 100644 cmd/bpa-operator/pkg/apis/bpa/v1alpha1/software_types.go delete mode 100644 cmd/bpa-operator/pkg/apis/bpa/v1alpha1/zz_generated.deepcopy.go delete mode 100644 cmd/bpa-operator/pkg/apis/bpa/v1alpha1/zz_generated.openapi.go delete mode 100644 cmd/bpa-operator/pkg/controller/add_provisioning.go delete mode 100644 cmd/bpa-operator/pkg/controller/controller.go delete mode 100644 cmd/bpa-operator/pkg/controller/provisioning/provisioning_controller.go delete mode 100644 cmd/bpa-operator/pkg/controller/provisioning/provisioning_controller_test.go delete mode 100644 cmd/bpa-operator/tools.go delete mode 100644 cmd/bpa-operator/vendor/cloud.google.com/go/AUTHORS delete mode 100644 cmd/bpa-operator/vendor/cloud.google.com/go/CONTRIBUTORS delete mode 100644 cmd/bpa-operator/vendor/cloud.google.com/go/LICENSE delete mode 100644 cmd/bpa-operator/vendor/cloud.google.com/go/compute/metadata/metadata.go delete mode 100644 cmd/bpa-operator/vendor/contrib.go.opencensus.io/exporter/ocagent/.travis.yml delete mode 100644 cmd/bpa-operator/vendor/contrib.go.opencensus.io/exporter/ocagent/CONTRIBUTING.md delete mode 100644 cmd/bpa-operator/vendor/contrib.go.opencensus.io/exporter/ocagent/LICENSE delete mode 100644 cmd/bpa-operator/vendor/contrib.go.opencensus.io/exporter/ocagent/README.md delete mode 100644 cmd/bpa-operator/vendor/contrib.go.opencensus.io/exporter/ocagent/common.go delete mode 100644 cmd/bpa-operator/vendor/contrib.go.opencensus.io/exporter/ocagent/connection.go delete mode 100644 cmd/bpa-operator/vendor/contrib.go.opencensus.io/exporter/ocagent/go.mod delete mode 100644 cmd/bpa-operator/vendor/contrib.go.opencensus.io/exporter/ocagent/go.sum delete mode 100644 cmd/bpa-operator/vendor/contrib.go.opencensus.io/exporter/ocagent/nodeinfo.go delete mode 100644 cmd/bpa-operator/vendor/contrib.go.opencensus.io/exporter/ocagent/ocagent.go delete mode 100644 cmd/bpa-operator/vendor/contrib.go.opencensus.io/exporter/ocagent/options.go delete mode 100644 cmd/bpa-operator/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_spans.go delete mode 100644 cmd/bpa-operator/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_stats_to_metrics.go delete mode 100644 cmd/bpa-operator/vendor/contrib.go.opencensus.io/exporter/ocagent/version.go delete mode 100644 cmd/bpa-operator/vendor/github.com/Azure/go-autorest/LICENSE delete mode 100644 cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/adal/README.md delete mode 100644 cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/adal/config.go delete mode 100644 cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go delete mode 100644 cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go delete mode 100644 cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go delete mode 100644 cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/adal/token.go delete mode 100644 cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/adal/version.go delete mode 100644 cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/authorization.go delete mode 100644 cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/autorest.go delete mode 100644 cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/azure/async.go delete mode 100644 cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go delete mode 100644 cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go delete mode 100644 cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go delete mode 100644 cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go delete mode 100644 cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/client.go delete mode 100644 cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/date/date.go delete mode 100644 cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/date/time.go delete mode 100644 cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go delete mode 100644 cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go delete mode 100644 cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/date/utility.go delete mode 100644 cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/error.go delete mode 100644 cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/preparer.go delete mode 100644 cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/responder.go delete mode 100644 cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go delete mode 100644 cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go delete mode 100644 cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go delete mode 100644 cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/sender.go delete mode 100644 cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/utility.go delete mode 100644 cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/version.go delete mode 100644 cmd/bpa-operator/vendor/github.com/Azure/go-autorest/logger/logger.go delete mode 100644 cmd/bpa-operator/vendor/github.com/Azure/go-autorest/tracing/tracing.go delete mode 100644 cmd/bpa-operator/vendor/github.com/PuerkitoBio/purell/.gitignore delete mode 100644 cmd/bpa-operator/vendor/github.com/PuerkitoBio/purell/.travis.yml delete mode 100644 cmd/bpa-operator/vendor/github.com/PuerkitoBio/purell/LICENSE delete mode 100644 cmd/bpa-operator/vendor/github.com/PuerkitoBio/purell/README.md delete mode 100644 cmd/bpa-operator/vendor/github.com/PuerkitoBio/purell/purell.go delete mode 100644 cmd/bpa-operator/vendor/github.com/PuerkitoBio/urlesc/.travis.yml delete mode 100644 cmd/bpa-operator/vendor/github.com/PuerkitoBio/urlesc/LICENSE delete mode 100644 cmd/bpa-operator/vendor/github.com/PuerkitoBio/urlesc/README.md delete mode 100644 cmd/bpa-operator/vendor/github.com/PuerkitoBio/urlesc/urlesc.go delete mode 100644 cmd/bpa-operator/vendor/github.com/appscode/jsonpatch/.gitignore delete mode 100644 cmd/bpa-operator/vendor/github.com/appscode/jsonpatch/.travis.yml delete mode 100644 cmd/bpa-operator/vendor/github.com/appscode/jsonpatch/LICENSE delete mode 100644 cmd/bpa-operator/vendor/github.com/appscode/jsonpatch/README.md delete mode 100644 cmd/bpa-operator/vendor/github.com/appscode/jsonpatch/go.mod delete mode 100644 cmd/bpa-operator/vendor/github.com/appscode/jsonpatch/go.sum delete mode 100644 cmd/bpa-operator/vendor/github.com/appscode/jsonpatch/jsonpatch.go delete mode 100644 cmd/bpa-operator/vendor/github.com/beorn7/perks/LICENSE delete mode 100644 cmd/bpa-operator/vendor/github.com/beorn7/perks/quantile/exampledata.txt delete mode 100644 cmd/bpa-operator/vendor/github.com/beorn7/perks/quantile/stream.go delete mode 100644 cmd/bpa-operator/vendor/github.com/census-instrumentation/opencensus-proto/AUTHORS delete mode 100644 cmd/bpa-operator/vendor/github.com/census-instrumentation/opencensus-proto/LICENSE delete mode 100644 cmd/bpa-operator/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1/common.pb.go delete mode 100644 cmd/bpa-operator/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.go delete mode 100644 cmd/bpa-operator/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.go delete mode 100644 cmd/bpa-operator/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.gw.go delete mode 100644 cmd/bpa-operator/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1/metrics.pb.go delete mode 100644 cmd/bpa-operator/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1/resource.pb.go delete mode 100644 cmd/bpa-operator/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace.pb.go delete mode 100644 cmd/bpa-operator/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace_config.pb.go delete mode 100644 cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/LICENSE delete mode 100644 cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/NOTICE delete mode 100644 cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/apis/monitoring/register.go delete mode 100644 cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1/crd_kinds.go delete mode 100644 cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1/doc.go delete mode 100644 cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1/openapi_generated.go delete mode 100644 cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1/register.go delete mode 100644 cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1/types.go delete mode 100644 cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1/zz_generated.deepcopy.go delete mode 100644 cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/client/versioned/scheme/doc.go delete mode 100644 cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/client/versioned/scheme/register.go delete mode 100644 cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/alertmanager.go delete mode 100644 cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/doc.go delete mode 100644 cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/generated_expansion.go delete mode 100644 cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/monitoring_client.go delete mode 100644 cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/prometheus.go delete mode 100644 cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/prometheusrule.go delete mode 100644 cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/servicemonitor.go delete mode 100644 cmd/bpa-operator/vendor/github.com/davecgh/go-spew/LICENSE delete mode 100644 cmd/bpa-operator/vendor/github.com/davecgh/go-spew/spew/bypass.go delete mode 100644 cmd/bpa-operator/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go delete mode 100644 cmd/bpa-operator/vendor/github.com/davecgh/go-spew/spew/common.go delete mode 100644 cmd/bpa-operator/vendor/github.com/davecgh/go-spew/spew/config.go delete mode 100644 cmd/bpa-operator/vendor/github.com/davecgh/go-spew/spew/doc.go delete mode 100644 cmd/bpa-operator/vendor/github.com/davecgh/go-spew/spew/dump.go delete mode 100644 cmd/bpa-operator/vendor/github.com/davecgh/go-spew/spew/format.go delete mode 100644 cmd/bpa-operator/vendor/github.com/davecgh/go-spew/spew/spew.go delete mode 100644 cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/.gitignore delete mode 100644 cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/.travis.yml delete mode 100644 cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/LICENSE delete mode 100644 cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md delete mode 100644 cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/README.md delete mode 100644 cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md delete mode 100644 cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/claims.go delete mode 100644 cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/doc.go delete mode 100644 cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/ecdsa.go delete mode 100644 cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go delete mode 100644 cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/errors.go delete mode 100644 cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/hmac.go delete mode 100644 cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/map_claims.go delete mode 100644 cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/none.go delete mode 100644 cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/parser.go delete mode 100644 cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/rsa.go delete mode 100644 cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go delete mode 100644 cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go delete mode 100644 cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/signing_method.go delete mode 100644 cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/token.go delete mode 100644 cmd/bpa-operator/vendor/github.com/emicklei/go-restful/.gitignore delete mode 100644 cmd/bpa-operator/vendor/github.com/emicklei/go-restful/.travis.yml delete mode 100644 cmd/bpa-operator/vendor/github.com/emicklei/go-restful/CHANGES.md delete mode 100644 cmd/bpa-operator/vendor/github.com/emicklei/go-restful/LICENSE delete mode 100644 cmd/bpa-operator/vendor/github.com/emicklei/go-restful/Makefile delete mode 100644 cmd/bpa-operator/vendor/github.com/emicklei/go-restful/README.md delete mode 100644 cmd/bpa-operator/vendor/github.com/emicklei/go-restful/Srcfile delete mode 100644 cmd/bpa-operator/vendor/github.com/emicklei/go-restful/bench_test.sh delete mode 100644 cmd/bpa-operator/vendor/github.com/emicklei/go-restful/compress.go delete mode 100644 cmd/bpa-operator/vendor/github.com/emicklei/go-restful/compressor_cache.go delete mode 100644 cmd/bpa-operator/vendor/github.com/emicklei/go-restful/compressor_pools.go delete mode 100644 cmd/bpa-operator/vendor/github.com/emicklei/go-restful/compressors.go delete mode 100644 cmd/bpa-operator/vendor/github.com/emicklei/go-restful/constants.go delete mode 100644 cmd/bpa-operator/vendor/github.com/emicklei/go-restful/container.go delete mode 100644 cmd/bpa-operator/vendor/github.com/emicklei/go-restful/cors_filter.go delete mode 100644 cmd/bpa-operator/vendor/github.com/emicklei/go-restful/coverage.sh delete mode 100644 cmd/bpa-operator/vendor/github.com/emicklei/go-restful/curly.go delete mode 100644 cmd/bpa-operator/vendor/github.com/emicklei/go-restful/curly_route.go delete mode 100644 cmd/bpa-operator/vendor/github.com/emicklei/go-restful/doc.go delete mode 100644 cmd/bpa-operator/vendor/github.com/emicklei/go-restful/entity_accessors.go delete mode 100644 cmd/bpa-operator/vendor/github.com/emicklei/go-restful/filter.go delete mode 100644 cmd/bpa-operator/vendor/github.com/emicklei/go-restful/json.go delete mode 100644 cmd/bpa-operator/vendor/github.com/emicklei/go-restful/jsoniter.go delete mode 100644 cmd/bpa-operator/vendor/github.com/emicklei/go-restful/jsr311.go delete mode 100644 cmd/bpa-operator/vendor/github.com/emicklei/go-restful/log/log.go delete mode 100644 cmd/bpa-operator/vendor/github.com/emicklei/go-restful/logger.go delete mode 100644 cmd/bpa-operator/vendor/github.com/emicklei/go-restful/mime.go delete mode 100644 cmd/bpa-operator/vendor/github.com/emicklei/go-restful/options_filter.go delete mode 100644 cmd/bpa-operator/vendor/github.com/emicklei/go-restful/parameter.go delete mode 100644 cmd/bpa-operator/vendor/github.com/emicklei/go-restful/path_expression.go delete mode 100644 cmd/bpa-operator/vendor/github.com/emicklei/go-restful/path_processor.go delete mode 100644 cmd/bpa-operator/vendor/github.com/emicklei/go-restful/request.go delete mode 100644 cmd/bpa-operator/vendor/github.com/emicklei/go-restful/response.go delete mode 100644 cmd/bpa-operator/vendor/github.com/emicklei/go-restful/route.go delete mode 100644 cmd/bpa-operator/vendor/github.com/emicklei/go-restful/route_builder.go delete mode 100644 cmd/bpa-operator/vendor/github.com/emicklei/go-restful/router.go delete mode 100644 cmd/bpa-operator/vendor/github.com/emicklei/go-restful/service_error.go delete mode 100644 cmd/bpa-operator/vendor/github.com/emicklei/go-restful/web_service.go delete mode 100644 cmd/bpa-operator/vendor/github.com/emicklei/go-restful/web_service_container.go delete mode 100644 cmd/bpa-operator/vendor/github.com/evanphx/json-patch/.travis.yml delete mode 100644 cmd/bpa-operator/vendor/github.com/evanphx/json-patch/LICENSE delete mode 100644 cmd/bpa-operator/vendor/github.com/evanphx/json-patch/README.md delete mode 100644 cmd/bpa-operator/vendor/github.com/evanphx/json-patch/merge.go delete mode 100644 cmd/bpa-operator/vendor/github.com/evanphx/json-patch/patch.go delete mode 100644 cmd/bpa-operator/vendor/github.com/ghodss/yaml/.gitignore delete mode 100644 cmd/bpa-operator/vendor/github.com/ghodss/yaml/.travis.yml delete mode 100644 cmd/bpa-operator/vendor/github.com/ghodss/yaml/LICENSE delete mode 100644 cmd/bpa-operator/vendor/github.com/ghodss/yaml/README.md delete mode 100644 cmd/bpa-operator/vendor/github.com/ghodss/yaml/fields.go delete mode 100644 cmd/bpa-operator/vendor/github.com/ghodss/yaml/yaml.go delete mode 100644 cmd/bpa-operator/vendor/github.com/go-logr/logr/LICENSE delete mode 100644 cmd/bpa-operator/vendor/github.com/go-logr/logr/README.md delete mode 100644 cmd/bpa-operator/vendor/github.com/go-logr/logr/logr.go delete mode 100644 cmd/bpa-operator/vendor/github.com/go-logr/zapr/.gitignore delete mode 100644 cmd/bpa-operator/vendor/github.com/go-logr/zapr/Gopkg.lock delete mode 100644 cmd/bpa-operator/vendor/github.com/go-logr/zapr/Gopkg.toml delete mode 100644 cmd/bpa-operator/vendor/github.com/go-logr/zapr/LICENSE delete mode 100644 cmd/bpa-operator/vendor/github.com/go-logr/zapr/README.md delete mode 100644 cmd/bpa-operator/vendor/github.com/go-logr/zapr/zapr.go delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/jsonpointer/.editorconfig delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/jsonpointer/.gitignore delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/jsonpointer/.travis.yml delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/jsonpointer/LICENSE delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/jsonpointer/README.md delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/jsonpointer/go.mod delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/jsonpointer/go.sum delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/jsonpointer/pointer.go delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/jsonreference/.gitignore delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/jsonreference/.travis.yml delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/jsonreference/LICENSE delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/jsonreference/README.md delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/jsonreference/go.mod delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/jsonreference/go.sum delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/jsonreference/reference.go delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/spec/.editorconfig delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/spec/.gitignore delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/spec/.golangci.yml delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/spec/.travis.yml delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/spec/LICENSE delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/spec/README.md delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/spec/bindata.go delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/spec/cache.go delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/spec/contact_info.go delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/spec/debug.go delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/spec/expander.go delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/spec/external_docs.go delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/spec/go.mod delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/spec/go.sum delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/spec/header.go delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/spec/info.go delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/spec/items.go delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/spec/license.go delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/spec/normalizer.go delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/spec/operation.go delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/spec/parameter.go delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/spec/path_item.go delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/spec/paths.go delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/spec/ref.go delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/spec/response.go delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/spec/responses.go delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/spec/schema.go delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/spec/schema_loader.go delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/spec/security_scheme.go delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/spec/spec.go delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/spec/swagger.go delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/spec/tag.go delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/spec/unused.go delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/spec/xml_object.go delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/swag/.editorconfig delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/swag/.gitignore delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/swag/.golangci.yml delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/swag/.travis.yml delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/swag/LICENSE delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/swag/README.md delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/swag/convert.go delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/swag/convert_types.go delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/swag/doc.go delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/swag/go.mod delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/swag/go.sum delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/swag/json.go delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/swag/loading.go delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/swag/net.go delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/swag/path.go delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/swag/post_go18.go delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/swag/post_go19.go delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/swag/pre_go18.go delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/swag/pre_go19.go delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/swag/util.go delete mode 100644 cmd/bpa-operator/vendor/github.com/go-openapi/swag/yaml.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gobuffalo/envy/.env delete mode 100644 cmd/bpa-operator/vendor/github.com/gobuffalo/envy/.gitignore delete mode 100644 cmd/bpa-operator/vendor/github.com/gobuffalo/envy/.gometalinter.json delete mode 100644 cmd/bpa-operator/vendor/github.com/gobuffalo/envy/.travis.yml delete mode 100644 cmd/bpa-operator/vendor/github.com/gobuffalo/envy/LICENSE.txt delete mode 100644 cmd/bpa-operator/vendor/github.com/gobuffalo/envy/Makefile delete mode 100644 cmd/bpa-operator/vendor/github.com/gobuffalo/envy/README.md delete mode 100644 cmd/bpa-operator/vendor/github.com/gobuffalo/envy/SHOULDERS.md delete mode 100644 cmd/bpa-operator/vendor/github.com/gobuffalo/envy/envy.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gobuffalo/envy/go.mod delete mode 100644 cmd/bpa-operator/vendor/github.com/gobuffalo/envy/go.sum delete mode 100644 cmd/bpa-operator/vendor/github.com/gobuffalo/envy/version.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gogo/protobuf/AUTHORS delete mode 100644 cmd/bpa-operator/vendor/github.com/gogo/protobuf/CONTRIBUTORS delete mode 100644 cmd/bpa-operator/vendor/github.com/gogo/protobuf/LICENSE delete mode 100644 cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/Makefile delete mode 100644 cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/clone.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/custom_gogo.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/decode.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/deprecated.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/discard.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/duration.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/duration_gogo.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/encode.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/encode_gogo.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/equal.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/extensions.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/lib.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/lib_gogo.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/message_set.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/properties.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/properties_gogo.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/skip_gogo.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/table_marshal.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/table_merge.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/text.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/text_gogo.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/text_parser.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/timestamp.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/wrappers.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go delete mode 100644 cmd/bpa-operator/vendor/github.com/golang/groupcache/LICENSE delete mode 100644 cmd/bpa-operator/vendor/github.com/golang/groupcache/lru/lru.go delete mode 100644 cmd/bpa-operator/vendor/github.com/golang/protobuf/AUTHORS delete mode 100644 cmd/bpa-operator/vendor/github.com/golang/protobuf/CONTRIBUTORS delete mode 100644 cmd/bpa-operator/vendor/github.com/golang/protobuf/LICENSE delete mode 100644 cmd/bpa-operator/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go delete mode 100644 cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/clone.go delete mode 100644 cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/decode.go delete mode 100644 cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/deprecated.go delete mode 100644 cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/discard.go delete mode 100644 cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/encode.go delete mode 100644 cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/equal.go delete mode 100644 cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/extensions.go delete mode 100644 cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/lib.go delete mode 100644 cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/message_set.go delete mode 100644 cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/pointer_reflect.go delete mode 100644 cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go delete mode 100644 cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/properties.go delete mode 100644 cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/table_marshal.go delete mode 100644 cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/table_merge.go delete mode 100644 cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/table_unmarshal.go delete mode 100644 cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/text.go delete mode 100644 cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/text_parser.go delete mode 100644 cmd/bpa-operator/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go delete mode 100644 cmd/bpa-operator/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto delete mode 100644 cmd/bpa-operator/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go delete mode 100644 cmd/bpa-operator/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go delete mode 100644 cmd/bpa-operator/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go delete mode 100644 cmd/bpa-operator/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden delete mode 100644 cmd/bpa-operator/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto delete mode 100644 cmd/bpa-operator/vendor/github.com/golang/protobuf/ptypes/any.go delete mode 100644 cmd/bpa-operator/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go delete mode 100644 cmd/bpa-operator/vendor/github.com/golang/protobuf/ptypes/any/any.proto delete mode 100644 cmd/bpa-operator/vendor/github.com/golang/protobuf/ptypes/doc.go delete mode 100644 cmd/bpa-operator/vendor/github.com/golang/protobuf/ptypes/duration.go delete mode 100644 cmd/bpa-operator/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go delete mode 100644 cmd/bpa-operator/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto delete mode 100644 cmd/bpa-operator/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go delete mode 100644 cmd/bpa-operator/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto delete mode 100644 cmd/bpa-operator/vendor/github.com/golang/protobuf/ptypes/timestamp.go delete mode 100644 cmd/bpa-operator/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go delete mode 100644 cmd/bpa-operator/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto delete mode 100644 cmd/bpa-operator/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go delete mode 100644 cmd/bpa-operator/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto delete mode 100644 cmd/bpa-operator/vendor/github.com/google/btree/.travis.yml delete mode 100644 cmd/bpa-operator/vendor/github.com/google/btree/LICENSE delete mode 100644 cmd/bpa-operator/vendor/github.com/google/btree/README.md delete mode 100644 cmd/bpa-operator/vendor/github.com/google/btree/btree.go delete mode 100644 cmd/bpa-operator/vendor/github.com/google/btree/btree_mem.go delete mode 100644 cmd/bpa-operator/vendor/github.com/google/gofuzz/.travis.yml delete mode 100644 cmd/bpa-operator/vendor/github.com/google/gofuzz/CONTRIBUTING.md delete mode 100644 cmd/bpa-operator/vendor/github.com/google/gofuzz/LICENSE delete mode 100644 cmd/bpa-operator/vendor/github.com/google/gofuzz/README.md delete mode 100644 cmd/bpa-operator/vendor/github.com/google/gofuzz/doc.go delete mode 100644 cmd/bpa-operator/vendor/github.com/google/gofuzz/fuzz.go delete mode 100644 cmd/bpa-operator/vendor/github.com/google/gofuzz/go.mod delete mode 100644 cmd/bpa-operator/vendor/github.com/google/uuid/.travis.yml delete mode 100644 cmd/bpa-operator/vendor/github.com/google/uuid/CONTRIBUTING.md delete mode 100644 cmd/bpa-operator/vendor/github.com/google/uuid/CONTRIBUTORS delete mode 100644 cmd/bpa-operator/vendor/github.com/google/uuid/LICENSE delete mode 100644 cmd/bpa-operator/vendor/github.com/google/uuid/README.md delete mode 100644 cmd/bpa-operator/vendor/github.com/google/uuid/dce.go delete mode 100644 cmd/bpa-operator/vendor/github.com/google/uuid/doc.go delete mode 100644 cmd/bpa-operator/vendor/github.com/google/uuid/go.mod delete mode 100644 cmd/bpa-operator/vendor/github.com/google/uuid/hash.go delete mode 100644 cmd/bpa-operator/vendor/github.com/google/uuid/marshal.go delete mode 100644 cmd/bpa-operator/vendor/github.com/google/uuid/node.go delete mode 100644 cmd/bpa-operator/vendor/github.com/google/uuid/node_js.go delete mode 100644 cmd/bpa-operator/vendor/github.com/google/uuid/node_net.go delete mode 100644 cmd/bpa-operator/vendor/github.com/google/uuid/sql.go delete mode 100644 cmd/bpa-operator/vendor/github.com/google/uuid/time.go delete mode 100644 cmd/bpa-operator/vendor/github.com/google/uuid/util.go delete mode 100644 cmd/bpa-operator/vendor/github.com/google/uuid/uuid.go delete mode 100644 cmd/bpa-operator/vendor/github.com/google/uuid/version1.go delete mode 100644 cmd/bpa-operator/vendor/github.com/google/uuid/version4.go delete mode 100644 cmd/bpa-operator/vendor/github.com/googleapis/gnostic/LICENSE delete mode 100644 cmd/bpa-operator/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go delete mode 100644 cmd/bpa-operator/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go delete mode 100644 cmd/bpa-operator/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto delete mode 100644 cmd/bpa-operator/vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md delete mode 100644 cmd/bpa-operator/vendor/github.com/googleapis/gnostic/OpenAPIv2/openapi-2.0.json delete mode 100644 cmd/bpa-operator/vendor/github.com/googleapis/gnostic/compiler/README.md delete mode 100644 cmd/bpa-operator/vendor/github.com/googleapis/gnostic/compiler/context.go delete mode 100644 cmd/bpa-operator/vendor/github.com/googleapis/gnostic/compiler/error.go delete mode 100644 cmd/bpa-operator/vendor/github.com/googleapis/gnostic/compiler/extension-handler.go delete mode 100644 cmd/bpa-operator/vendor/github.com/googleapis/gnostic/compiler/helpers.go delete mode 100644 cmd/bpa-operator/vendor/github.com/googleapis/gnostic/compiler/main.go delete mode 100644 cmd/bpa-operator/vendor/github.com/googleapis/gnostic/compiler/reader.go delete mode 100644 cmd/bpa-operator/vendor/github.com/googleapis/gnostic/extensions/COMPILE-EXTENSION.sh delete mode 100644 cmd/bpa-operator/vendor/github.com/googleapis/gnostic/extensions/README.md delete mode 100644 cmd/bpa-operator/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go delete mode 100644 cmd/bpa-operator/vendor/github.com/googleapis/gnostic/extensions/extension.proto delete mode 100644 cmd/bpa-operator/vendor/github.com/googleapis/gnostic/extensions/extensions.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/.gitignore delete mode 100644 cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/.travis.yml delete mode 100644 cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/.zuul.yaml delete mode 100644 cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md delete mode 100644 cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/LICENSE delete mode 100644 cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/README.md delete mode 100644 cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/auth_options.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/auth_result.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/doc.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/endpoint_search.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/errors.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/go.mod delete mode 100644 cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/go.sum delete mode 100644 cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/client.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/doc.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/endpoint_location.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/errors.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/doc.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/requests.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/results.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/urls.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/doc.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/requests.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/results.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/urls.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/doc.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/requests.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/results.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/urls.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/utils/base_endpoint.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/utils/choose_version.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/pagination/http.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/pagination/linked.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/pagination/marker.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/pagination/pager.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/pagination/pkg.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/pagination/single.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/params.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/provider_client.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/results.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/service_client.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/util.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gregjones/httpcache/.travis.yml delete mode 100644 cmd/bpa-operator/vendor/github.com/gregjones/httpcache/LICENSE.txt delete mode 100644 cmd/bpa-operator/vendor/github.com/gregjones/httpcache/README.md delete mode 100644 cmd/bpa-operator/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go delete mode 100644 cmd/bpa-operator/vendor/github.com/gregjones/httpcache/httpcache.go delete mode 100644 cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt delete mode 100644 cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel delete mode 100644 cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.pb.go delete mode 100644 cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.proto delete mode 100644 cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel delete mode 100644 cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go delete mode 100644 cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go delete mode 100644 cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go delete mode 100644 cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go delete mode 100644 cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go delete mode 100644 cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go delete mode 100644 cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go delete mode 100644 cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go delete mode 100644 cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go delete mode 100644 cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go delete mode 100644 cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go delete mode 100644 cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go delete mode 100644 cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go delete mode 100644 cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go delete mode 100644 cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go delete mode 100644 cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go delete mode 100644 cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go delete mode 100644 cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel delete mode 100644 cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go delete mode 100644 cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go delete mode 100644 cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go delete mode 100644 cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go delete mode 100644 cmd/bpa-operator/vendor/github.com/hashicorp/golang-lru/.gitignore delete mode 100644 cmd/bpa-operator/vendor/github.com/hashicorp/golang-lru/2q.go delete mode 100644 cmd/bpa-operator/vendor/github.com/hashicorp/golang-lru/LICENSE delete mode 100644 cmd/bpa-operator/vendor/github.com/hashicorp/golang-lru/README.md delete mode 100644 cmd/bpa-operator/vendor/github.com/hashicorp/golang-lru/arc.go delete mode 100644 cmd/bpa-operator/vendor/github.com/hashicorp/golang-lru/doc.go delete mode 100644 cmd/bpa-operator/vendor/github.com/hashicorp/golang-lru/go.mod delete mode 100644 cmd/bpa-operator/vendor/github.com/hashicorp/golang-lru/lru.go delete mode 100644 cmd/bpa-operator/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go delete mode 100644 cmd/bpa-operator/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go delete mode 100644 cmd/bpa-operator/vendor/github.com/imdario/mergo/.gitignore delete mode 100644 cmd/bpa-operator/vendor/github.com/imdario/mergo/.travis.yml delete mode 100644 cmd/bpa-operator/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md delete mode 100644 cmd/bpa-operator/vendor/github.com/imdario/mergo/LICENSE delete mode 100644 cmd/bpa-operator/vendor/github.com/imdario/mergo/README.md delete mode 100644 cmd/bpa-operator/vendor/github.com/imdario/mergo/doc.go delete mode 100644 cmd/bpa-operator/vendor/github.com/imdario/mergo/map.go delete mode 100644 cmd/bpa-operator/vendor/github.com/imdario/mergo/merge.go delete mode 100644 cmd/bpa-operator/vendor/github.com/imdario/mergo/mergo.go delete mode 100644 cmd/bpa-operator/vendor/github.com/joho/godotenv/.gitignore delete mode 100644 cmd/bpa-operator/vendor/github.com/joho/godotenv/.travis.yml delete mode 100644 cmd/bpa-operator/vendor/github.com/joho/godotenv/LICENCE delete mode 100644 cmd/bpa-operator/vendor/github.com/joho/godotenv/README.md delete mode 100644 cmd/bpa-operator/vendor/github.com/joho/godotenv/godotenv.go delete mode 100644 cmd/bpa-operator/vendor/github.com/json-iterator/go/.codecov.yml delete mode 100644 cmd/bpa-operator/vendor/github.com/json-iterator/go/.gitignore delete mode 100644 cmd/bpa-operator/vendor/github.com/json-iterator/go/.travis.yml delete mode 100644 cmd/bpa-operator/vendor/github.com/json-iterator/go/Gopkg.lock delete mode 100644 cmd/bpa-operator/vendor/github.com/json-iterator/go/Gopkg.toml delete mode 100644 cmd/bpa-operator/vendor/github.com/json-iterator/go/LICENSE delete mode 100644 cmd/bpa-operator/vendor/github.com/json-iterator/go/README.md delete mode 100644 cmd/bpa-operator/vendor/github.com/json-iterator/go/adapter.go delete mode 100644 cmd/bpa-operator/vendor/github.com/json-iterator/go/any.go delete mode 100644 cmd/bpa-operator/vendor/github.com/json-iterator/go/any_array.go delete mode 100644 cmd/bpa-operator/vendor/github.com/json-iterator/go/any_bool.go delete mode 100644 cmd/bpa-operator/vendor/github.com/json-iterator/go/any_float.go delete mode 100644 cmd/bpa-operator/vendor/github.com/json-iterator/go/any_int32.go delete mode 100644 cmd/bpa-operator/vendor/github.com/json-iterator/go/any_int64.go delete mode 100644 cmd/bpa-operator/vendor/github.com/json-iterator/go/any_invalid.go delete mode 100644 cmd/bpa-operator/vendor/github.com/json-iterator/go/any_nil.go delete mode 100644 cmd/bpa-operator/vendor/github.com/json-iterator/go/any_number.go delete mode 100644 cmd/bpa-operator/vendor/github.com/json-iterator/go/any_object.go delete mode 100644 cmd/bpa-operator/vendor/github.com/json-iterator/go/any_str.go delete mode 100644 cmd/bpa-operator/vendor/github.com/json-iterator/go/any_uint32.go delete mode 100644 cmd/bpa-operator/vendor/github.com/json-iterator/go/any_uint64.go delete mode 100644 cmd/bpa-operator/vendor/github.com/json-iterator/go/build.sh delete mode 100644 cmd/bpa-operator/vendor/github.com/json-iterator/go/config.go delete mode 100644 cmd/bpa-operator/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md delete mode 100644 cmd/bpa-operator/vendor/github.com/json-iterator/go/iter.go delete mode 100644 cmd/bpa-operator/vendor/github.com/json-iterator/go/iter_array.go delete mode 100644 cmd/bpa-operator/vendor/github.com/json-iterator/go/iter_float.go delete mode 100644 cmd/bpa-operator/vendor/github.com/json-iterator/go/iter_int.go delete mode 100644 cmd/bpa-operator/vendor/github.com/json-iterator/go/iter_object.go delete mode 100644 cmd/bpa-operator/vendor/github.com/json-iterator/go/iter_skip.go delete mode 100644 cmd/bpa-operator/vendor/github.com/json-iterator/go/iter_skip_sloppy.go delete mode 100644 cmd/bpa-operator/vendor/github.com/json-iterator/go/iter_skip_strict.go delete mode 100644 cmd/bpa-operator/vendor/github.com/json-iterator/go/iter_str.go delete mode 100644 cmd/bpa-operator/vendor/github.com/json-iterator/go/jsoniter.go delete mode 100644 cmd/bpa-operator/vendor/github.com/json-iterator/go/pool.go delete mode 100644 cmd/bpa-operator/vendor/github.com/json-iterator/go/reflect.go delete mode 100644 cmd/bpa-operator/vendor/github.com/json-iterator/go/reflect_array.go delete mode 100644 cmd/bpa-operator/vendor/github.com/json-iterator/go/reflect_dynamic.go delete mode 100644 cmd/bpa-operator/vendor/github.com/json-iterator/go/reflect_extension.go delete mode 100644 cmd/bpa-operator/vendor/github.com/json-iterator/go/reflect_json_number.go delete mode 100644 cmd/bpa-operator/vendor/github.com/json-iterator/go/reflect_json_raw_message.go delete mode 100644 cmd/bpa-operator/vendor/github.com/json-iterator/go/reflect_map.go delete mode 100644 cmd/bpa-operator/vendor/github.com/json-iterator/go/reflect_marshaler.go delete mode 100644 cmd/bpa-operator/vendor/github.com/json-iterator/go/reflect_native.go delete mode 100644 cmd/bpa-operator/vendor/github.com/json-iterator/go/reflect_optional.go delete mode 100644 cmd/bpa-operator/vendor/github.com/json-iterator/go/reflect_slice.go delete mode 100644 cmd/bpa-operator/vendor/github.com/json-iterator/go/reflect_struct_decoder.go delete mode 100644 cmd/bpa-operator/vendor/github.com/json-iterator/go/reflect_struct_encoder.go delete mode 100644 cmd/bpa-operator/vendor/github.com/json-iterator/go/stream.go delete mode 100644 cmd/bpa-operator/vendor/github.com/json-iterator/go/stream_float.go delete mode 100644 cmd/bpa-operator/vendor/github.com/json-iterator/go/stream_int.go delete mode 100644 cmd/bpa-operator/vendor/github.com/json-iterator/go/stream_str.go delete mode 100644 cmd/bpa-operator/vendor/github.com/json-iterator/go/test.sh delete mode 100644 cmd/bpa-operator/vendor/github.com/mailru/easyjson/LICENSE delete mode 100644 cmd/bpa-operator/vendor/github.com/mailru/easyjson/buffer/pool.go delete mode 100644 cmd/bpa-operator/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go delete mode 100644 cmd/bpa-operator/vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go delete mode 100644 cmd/bpa-operator/vendor/github.com/mailru/easyjson/jlexer/error.go delete mode 100644 cmd/bpa-operator/vendor/github.com/mailru/easyjson/jlexer/lexer.go delete mode 100644 cmd/bpa-operator/vendor/github.com/mailru/easyjson/jwriter/writer.go delete mode 100644 cmd/bpa-operator/vendor/github.com/markbates/inflect/.gitignore delete mode 100644 cmd/bpa-operator/vendor/github.com/markbates/inflect/.gometalinter.json delete mode 100644 cmd/bpa-operator/vendor/github.com/markbates/inflect/.hgignore delete mode 100644 cmd/bpa-operator/vendor/github.com/markbates/inflect/.travis.yml delete mode 100644 cmd/bpa-operator/vendor/github.com/markbates/inflect/LICENCE delete mode 100644 cmd/bpa-operator/vendor/github.com/markbates/inflect/Makefile delete mode 100644 cmd/bpa-operator/vendor/github.com/markbates/inflect/README.md delete mode 100644 cmd/bpa-operator/vendor/github.com/markbates/inflect/go.mod delete mode 100644 cmd/bpa-operator/vendor/github.com/markbates/inflect/go.sum delete mode 100644 cmd/bpa-operator/vendor/github.com/markbates/inflect/helpers.go delete mode 100644 cmd/bpa-operator/vendor/github.com/markbates/inflect/inflect.go delete mode 100644 cmd/bpa-operator/vendor/github.com/markbates/inflect/inflections.json delete mode 100644 cmd/bpa-operator/vendor/github.com/markbates/inflect/name.go delete mode 100644 cmd/bpa-operator/vendor/github.com/markbates/inflect/shoulders.md delete mode 100644 cmd/bpa-operator/vendor/github.com/markbates/inflect/version.go delete mode 100644 cmd/bpa-operator/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE delete mode 100644 cmd/bpa-operator/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE delete mode 100644 cmd/bpa-operator/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore delete mode 100644 cmd/bpa-operator/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile delete mode 100644 cmd/bpa-operator/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go delete mode 100644 cmd/bpa-operator/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go delete mode 100644 cmd/bpa-operator/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go delete mode 100644 cmd/bpa-operator/vendor/github.com/metal3-io/baremetal-operator/LICENSE delete mode 100644 cmd/bpa-operator/vendor/github.com/metal3-io/baremetal-operator/pkg/apis/addtoscheme_metal3_v1alpha1.go delete mode 100644 cmd/bpa-operator/vendor/github.com/metal3-io/baremetal-operator/pkg/apis/apis.go delete mode 100644 cmd/bpa-operator/vendor/github.com/metal3-io/baremetal-operator/pkg/apis/metal3/v1alpha1/baremetalhost_types.go delete mode 100644 cmd/bpa-operator/vendor/github.com/metal3-io/baremetal-operator/pkg/apis/metal3/v1alpha1/doc.go delete mode 100644 cmd/bpa-operator/vendor/github.com/metal3-io/baremetal-operator/pkg/apis/metal3/v1alpha1/register.go delete mode 100644 cmd/bpa-operator/vendor/github.com/metal3-io/baremetal-operator/pkg/apis/metal3/v1alpha1/zz_generated.deepcopy.go delete mode 100644 cmd/bpa-operator/vendor/github.com/metal3-io/baremetal-operator/pkg/apis/metal3/v1alpha1/zz_generated.defaults.go delete mode 100644 cmd/bpa-operator/vendor/github.com/metal3-io/baremetal-operator/pkg/apis/metal3/v1alpha1/zz_generated.openapi.go delete mode 100644 cmd/bpa-operator/vendor/github.com/modern-go/concurrent/.gitignore delete mode 100644 cmd/bpa-operator/vendor/github.com/modern-go/concurrent/.travis.yml delete mode 100644 cmd/bpa-operator/vendor/github.com/modern-go/concurrent/LICENSE delete mode 100644 cmd/bpa-operator/vendor/github.com/modern-go/concurrent/README.md delete mode 100644 cmd/bpa-operator/vendor/github.com/modern-go/concurrent/executor.go delete mode 100644 cmd/bpa-operator/vendor/github.com/modern-go/concurrent/go_above_19.go delete mode 100644 cmd/bpa-operator/vendor/github.com/modern-go/concurrent/go_below_19.go delete mode 100644 cmd/bpa-operator/vendor/github.com/modern-go/concurrent/log.go delete mode 100644 cmd/bpa-operator/vendor/github.com/modern-go/concurrent/test.sh delete mode 100644 cmd/bpa-operator/vendor/github.com/modern-go/concurrent/unbounded_executor.go delete mode 100644 cmd/bpa-operator/vendor/github.com/modern-go/reflect2/.gitignore delete mode 100644 cmd/bpa-operator/vendor/github.com/modern-go/reflect2/.travis.yml delete mode 100644 cmd/bpa-operator/vendor/github.com/modern-go/reflect2/Gopkg.lock delete mode 100644 cmd/bpa-operator/vendor/github.com/modern-go/reflect2/Gopkg.toml delete mode 100644 cmd/bpa-operator/vendor/github.com/modern-go/reflect2/LICENSE delete mode 100644 cmd/bpa-operator/vendor/github.com/modern-go/reflect2/README.md delete mode 100644 cmd/bpa-operator/vendor/github.com/modern-go/reflect2/go_above_17.go delete mode 100644 cmd/bpa-operator/vendor/github.com/modern-go/reflect2/go_above_19.go delete mode 100644 cmd/bpa-operator/vendor/github.com/modern-go/reflect2/go_below_17.go delete mode 100644 cmd/bpa-operator/vendor/github.com/modern-go/reflect2/go_below_19.go delete mode 100644 cmd/bpa-operator/vendor/github.com/modern-go/reflect2/reflect2.go delete mode 100644 cmd/bpa-operator/vendor/github.com/modern-go/reflect2/reflect2_amd64.s delete mode 100644 cmd/bpa-operator/vendor/github.com/modern-go/reflect2/reflect2_kind.go delete mode 100644 cmd/bpa-operator/vendor/github.com/modern-go/reflect2/relfect2_386.s delete mode 100644 cmd/bpa-operator/vendor/github.com/modern-go/reflect2/relfect2_amd64p32.s delete mode 100644 cmd/bpa-operator/vendor/github.com/modern-go/reflect2/relfect2_arm.s delete mode 100644 cmd/bpa-operator/vendor/github.com/modern-go/reflect2/relfect2_arm64.s delete mode 100644 cmd/bpa-operator/vendor/github.com/modern-go/reflect2/relfect2_mips64x.s delete mode 100644 cmd/bpa-operator/vendor/github.com/modern-go/reflect2/relfect2_mipsx.s delete mode 100644 cmd/bpa-operator/vendor/github.com/modern-go/reflect2/relfect2_ppc64x.s delete mode 100644 cmd/bpa-operator/vendor/github.com/modern-go/reflect2/relfect2_s390x.s delete mode 100644 cmd/bpa-operator/vendor/github.com/modern-go/reflect2/safe_field.go delete mode 100644 cmd/bpa-operator/vendor/github.com/modern-go/reflect2/safe_map.go delete mode 100644 cmd/bpa-operator/vendor/github.com/modern-go/reflect2/safe_slice.go delete mode 100644 cmd/bpa-operator/vendor/github.com/modern-go/reflect2/safe_struct.go delete mode 100644 cmd/bpa-operator/vendor/github.com/modern-go/reflect2/safe_type.go delete mode 100644 cmd/bpa-operator/vendor/github.com/modern-go/reflect2/test.sh delete mode 100644 cmd/bpa-operator/vendor/github.com/modern-go/reflect2/type_map.go delete mode 100644 cmd/bpa-operator/vendor/github.com/modern-go/reflect2/unsafe_array.go delete mode 100644 cmd/bpa-operator/vendor/github.com/modern-go/reflect2/unsafe_eface.go delete mode 100644 cmd/bpa-operator/vendor/github.com/modern-go/reflect2/unsafe_field.go delete mode 100644 cmd/bpa-operator/vendor/github.com/modern-go/reflect2/unsafe_iface.go delete mode 100644 cmd/bpa-operator/vendor/github.com/modern-go/reflect2/unsafe_link.go delete mode 100644 cmd/bpa-operator/vendor/github.com/modern-go/reflect2/unsafe_map.go delete mode 100644 cmd/bpa-operator/vendor/github.com/modern-go/reflect2/unsafe_ptr.go delete mode 100644 cmd/bpa-operator/vendor/github.com/modern-go/reflect2/unsafe_slice.go delete mode 100644 cmd/bpa-operator/vendor/github.com/modern-go/reflect2/unsafe_struct.go delete mode 100644 cmd/bpa-operator/vendor/github.com/modern-go/reflect2/unsafe_type.go delete mode 100644 cmd/bpa-operator/vendor/github.com/operator-framework/operator-sdk/LICENSE delete mode 100644 cmd/bpa-operator/vendor/github.com/operator-framework/operator-sdk/pkg/k8sutil/constants.go delete mode 100644 cmd/bpa-operator/vendor/github.com/operator-framework/operator-sdk/pkg/k8sutil/k8sutil.go delete mode 100644 cmd/bpa-operator/vendor/github.com/operator-framework/operator-sdk/pkg/kube-metrics/collector.go delete mode 100644 cmd/bpa-operator/vendor/github.com/operator-framework/operator-sdk/pkg/kube-metrics/metrics.go delete mode 100644 cmd/bpa-operator/vendor/github.com/operator-framework/operator-sdk/pkg/kube-metrics/server.go delete mode 100644 cmd/bpa-operator/vendor/github.com/operator-framework/operator-sdk/pkg/kube-metrics/uclient.go delete mode 100644 cmd/bpa-operator/vendor/github.com/operator-framework/operator-sdk/pkg/leader/doc.go delete mode 100644 cmd/bpa-operator/vendor/github.com/operator-framework/operator-sdk/pkg/leader/leader.go delete mode 100644 cmd/bpa-operator/vendor/github.com/operator-framework/operator-sdk/pkg/log/zap/flags.go delete mode 100644 cmd/bpa-operator/vendor/github.com/operator-framework/operator-sdk/pkg/log/zap/logger.go delete mode 100644 cmd/bpa-operator/vendor/github.com/operator-framework/operator-sdk/pkg/metrics/metrics.go delete mode 100644 cmd/bpa-operator/vendor/github.com/operator-framework/operator-sdk/pkg/metrics/service-monitor.go delete mode 100644 cmd/bpa-operator/vendor/github.com/operator-framework/operator-sdk/pkg/restmapper/dynamicrestmapper.go delete mode 100644 cmd/bpa-operator/vendor/github.com/operator-framework/operator-sdk/version/version.go delete mode 100644 cmd/bpa-operator/vendor/github.com/pborman/uuid/.travis.yml delete mode 100644 cmd/bpa-operator/vendor/github.com/pborman/uuid/CONTRIBUTING.md delete mode 100644 cmd/bpa-operator/vendor/github.com/pborman/uuid/CONTRIBUTORS delete mode 100644 cmd/bpa-operator/vendor/github.com/pborman/uuid/LICENSE delete mode 100644 cmd/bpa-operator/vendor/github.com/pborman/uuid/README.md delete mode 100644 cmd/bpa-operator/vendor/github.com/pborman/uuid/dce.go delete mode 100644 cmd/bpa-operator/vendor/github.com/pborman/uuid/doc.go delete mode 100644 cmd/bpa-operator/vendor/github.com/pborman/uuid/go.mod delete mode 100644 cmd/bpa-operator/vendor/github.com/pborman/uuid/go.sum delete mode 100644 cmd/bpa-operator/vendor/github.com/pborman/uuid/hash.go delete mode 100644 cmd/bpa-operator/vendor/github.com/pborman/uuid/marshal.go delete mode 100644 cmd/bpa-operator/vendor/github.com/pborman/uuid/node.go delete mode 100644 cmd/bpa-operator/vendor/github.com/pborman/uuid/sql.go delete mode 100644 cmd/bpa-operator/vendor/github.com/pborman/uuid/time.go delete mode 100644 cmd/bpa-operator/vendor/github.com/pborman/uuid/util.go delete mode 100644 cmd/bpa-operator/vendor/github.com/pborman/uuid/uuid.go delete mode 100644 cmd/bpa-operator/vendor/github.com/pborman/uuid/version1.go delete mode 100644 cmd/bpa-operator/vendor/github.com/pborman/uuid/version4.go delete mode 100644 cmd/bpa-operator/vendor/github.com/peterbourgon/diskv/LICENSE delete mode 100644 cmd/bpa-operator/vendor/github.com/peterbourgon/diskv/README.md delete mode 100644 cmd/bpa-operator/vendor/github.com/peterbourgon/diskv/compression.go delete mode 100644 cmd/bpa-operator/vendor/github.com/peterbourgon/diskv/diskv.go delete mode 100644 cmd/bpa-operator/vendor/github.com/peterbourgon/diskv/index.go delete mode 100644 cmd/bpa-operator/vendor/github.com/pkg/errors/.gitignore delete mode 100644 cmd/bpa-operator/vendor/github.com/pkg/errors/.travis.yml delete mode 100644 cmd/bpa-operator/vendor/github.com/pkg/errors/LICENSE delete mode 100644 cmd/bpa-operator/vendor/github.com/pkg/errors/README.md delete mode 100644 cmd/bpa-operator/vendor/github.com/pkg/errors/appveyor.yml delete mode 100644 cmd/bpa-operator/vendor/github.com/pkg/errors/errors.go delete mode 100644 cmd/bpa-operator/vendor/github.com/pkg/errors/stack.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/client_golang/LICENSE delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/client_golang/NOTICE delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/client_golang/prometheus/.gitignore delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/client_golang/prometheus/README.md delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/client_golang/prometheus/collector.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/client_golang/prometheus/counter.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/client_golang/prometheus/desc.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/client_golang/prometheus/doc.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/client_golang/prometheus/fnv.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/client_golang/prometheus/gauge.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/client_golang/prometheus/histogram.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/client_golang/prometheus/http.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/client_golang/prometheus/labels.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/client_golang/prometheus/metric.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/client_golang/prometheus/observer.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/client_golang/prometheus/registry.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/client_golang/prometheus/summary.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/client_golang/prometheus/timer.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/client_golang/prometheus/untyped.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/client_golang/prometheus/value.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/client_golang/prometheus/vec.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/client_golang/prometheus/wrap.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/client_model/LICENSE delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/client_model/NOTICE delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/client_model/go/metrics.pb.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/common/LICENSE delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/common/NOTICE delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/common/expfmt/decode.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/common/expfmt/encode.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/common/expfmt/expfmt.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/common/expfmt/fuzz.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/common/expfmt/text_create.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/common/expfmt/text_parse.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/common/model/alert.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/common/model/fingerprinting.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/common/model/fnv.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/common/model/labels.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/common/model/labelset.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/common/model/metric.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/common/model/model.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/common/model/signature.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/common/model/silence.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/common/model/time.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/common/model/value.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/procfs/.gitignore delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/procfs/CONTRIBUTING.md delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/procfs/LICENSE delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/procfs/MAINTAINERS.md delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/procfs/Makefile delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/procfs/Makefile.common delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/procfs/NOTICE delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/procfs/README.md delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/procfs/buddyinfo.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/procfs/doc.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/procfs/fixtures.ttar delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/procfs/fs.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/procfs/go.mod delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/procfs/go.sum delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/procfs/ipvs.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/procfs/mdstat.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/procfs/mountstats.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/procfs/net_dev.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/procfs/proc.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/procfs/proc_io.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/procfs/proc_limits.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/procfs/proc_ns.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/procfs/proc_psi.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/procfs/proc_stat.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/procfs/stat.go delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/procfs/ttar delete mode 100644 cmd/bpa-operator/vendor/github.com/prometheus/procfs/xfrm.go delete mode 100644 cmd/bpa-operator/vendor/github.com/rogpeppe/go-internal/LICENSE delete mode 100644 cmd/bpa-operator/vendor/github.com/rogpeppe/go-internal/modfile/gopkgin.go delete mode 100644 cmd/bpa-operator/vendor/github.com/rogpeppe/go-internal/modfile/print.go delete mode 100644 cmd/bpa-operator/vendor/github.com/rogpeppe/go-internal/modfile/read.go delete mode 100644 cmd/bpa-operator/vendor/github.com/rogpeppe/go-internal/modfile/rule.go delete mode 100644 cmd/bpa-operator/vendor/github.com/rogpeppe/go-internal/module/module.go delete mode 100644 cmd/bpa-operator/vendor/github.com/rogpeppe/go-internal/semver/semver.go delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/afero/.travis.yml delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/afero/LICENSE.txt delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/afero/README.md delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/afero/afero.go delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/afero/appveyor.yml delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/afero/basepath.go delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/afero/cacheOnReadFs.go delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/afero/const_bsds.go delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/afero/const_win_unix.go delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/afero/copyOnWriteFs.go delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/afero/go.mod delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/afero/go.sum delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/afero/httpFs.go delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/afero/ioutil.go delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/afero/lstater.go delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/afero/match.go delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/afero/mem/dir.go delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/afero/mem/dirmap.go delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/afero/mem/file.go delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/afero/memmap.go delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/afero/os.go delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/afero/path.go delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/afero/readonlyfs.go delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/afero/regexpfs.go delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/afero/unionFile.go delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/afero/util.go delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/pflag/.gitignore delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/pflag/.travis.yml delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/pflag/LICENSE delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/pflag/README.md delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/pflag/bool.go delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/pflag/bool_slice.go delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/pflag/bytes.go delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/pflag/count.go delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/pflag/duration.go delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/pflag/duration_slice.go delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/pflag/flag.go delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/pflag/float32.go delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/pflag/float64.go delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/pflag/golangflag.go delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/pflag/int.go delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/pflag/int16.go delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/pflag/int32.go delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/pflag/int64.go delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/pflag/int8.go delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/pflag/int_slice.go delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/pflag/ip.go delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/pflag/ip_slice.go delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/pflag/ipmask.go delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/pflag/ipnet.go delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/pflag/string.go delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/pflag/string_array.go delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/pflag/string_slice.go delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/pflag/string_to_int.go delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/pflag/string_to_string.go delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/pflag/uint.go delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/pflag/uint16.go delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/pflag/uint32.go delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/pflag/uint64.go delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/pflag/uint8.go delete mode 100644 cmd/bpa-operator/vendor/github.com/spf13/pflag/uint_slice.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/.gitignore delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/.travis.yml delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/AUTHORS delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/CONTRIBUTING.md delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/Gopkg.lock delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/Gopkg.toml delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/LICENSE delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/Makefile delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/README.md delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/appveyor.yml delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/go.mod delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/go.sum delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/internal/internal.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/internal/sanitize.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/internal/traceinternals.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/metric/metricdata/doc.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/metric/metricdata/exemplar.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/metric/metricdata/label.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/metric/metricdata/metric.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/metric/metricdata/point.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/metric/metricdata/type_string.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/metric/metricdata/unit.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/metric/metricproducer/manager.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/metric/metricproducer/producer.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/opencensus.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/plugin/ocgrpc/client.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/plugin/ocgrpc/client_stats_handler.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/plugin/ocgrpc/doc.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/plugin/ocgrpc/server.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/plugin/ocgrpc/server_stats_handler.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/plugin/ocgrpc/trace_common.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/plugin/ochttp/client.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/plugin/ochttp/client_stats.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/plugin/ochttp/doc.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/plugin/ochttp/propagation/tracecontext/propagation.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/plugin/ochttp/route.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/plugin/ochttp/server.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/plugin/ochttp/stats.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/plugin/ochttp/trace.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/resource/resource.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/stats/doc.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/stats/internal/record.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/stats/measure.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/stats/measure_float64.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/stats/measure_int64.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/stats/record.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/stats/units.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/stats/view/aggregation.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/stats/view/aggregation_data.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/stats/view/collector.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/stats/view/doc.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/stats/view/export.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/stats/view/view.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/stats/view/view_to_metric.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/stats/view/worker.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/stats/view/worker_commands.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/tag/context.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/tag/doc.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/tag/key.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/tag/map.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/tag/map_codec.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/tag/profile_19.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/tag/profile_not19.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/tag/validate.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/trace/basetypes.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/trace/config.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/trace/doc.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/trace/evictedqueue.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/trace/export.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/trace/internal/internal.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/trace/lrumap.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/trace/propagation/propagation.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/trace/sampling.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/trace/spanbucket.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/trace/spanstore.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/trace/status_codes.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/trace/trace.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/trace/trace_go11.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/trace/trace_nongo11.go delete mode 100644 cmd/bpa-operator/vendor/go.opencensus.io/trace/tracestate/tracestate.go delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/atomic/.codecov.yml delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/atomic/.gitignore delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/atomic/.travis.yml delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/atomic/LICENSE.txt delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/atomic/Makefile delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/atomic/README.md delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/atomic/atomic.go delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/atomic/glide.lock delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/atomic/glide.yaml delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/atomic/string.go delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/multierr/.codecov.yml delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/multierr/.gitignore delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/multierr/.travis.yml delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/multierr/CHANGELOG.md delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/multierr/LICENSE.txt delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/multierr/Makefile delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/multierr/README.md delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/multierr/error.go delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/multierr/glide.lock delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/multierr/glide.yaml delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/zap/.codecov.yml delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/zap/.gitignore delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/zap/.readme.tmpl delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/zap/.travis.yml delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/zap/CHANGELOG.md delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/zap/CONTRIBUTING.md delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/zap/FAQ.md delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/zap/LICENSE.txt delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/zap/Makefile delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/zap/README.md delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/zap/array.go delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/zap/buffer/buffer.go delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/zap/buffer/pool.go delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/zap/check_license.sh delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/zap/config.go delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/zap/doc.go delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/zap/encoder.go delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/zap/error.go delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/zap/field.go delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/zap/flag.go delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/zap/glide.lock delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/zap/glide.yaml delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/zap/global.go delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/zap/http_handler.go delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/zap/internal/color/color.go delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/zap/internal/exit/exit.go delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/zap/level.go delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/zap/logger.go delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/zap/options.go delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/zap/sink.go delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/zap/stacktrace.go delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/zap/sugar.go delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/zap/time.go delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/zap/writer.go delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/zap/zapcore/console_encoder.go delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/zap/zapcore/core.go delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/zap/zapcore/doc.go delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/zap/zapcore/encoder.go delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/zap/zapcore/entry.go delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/zap/zapcore/error.go delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/zap/zapcore/field.go delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/zap/zapcore/hook.go delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/zap/zapcore/json_encoder.go delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/zap/zapcore/level.go delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/zap/zapcore/level_strings.go delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/zap/zapcore/marshaler.go delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/zap/zapcore/memory_encoder.go delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/zap/zapcore/sampler.go delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/zap/zapcore/tee.go delete mode 100644 cmd/bpa-operator/vendor/go.uber.org/zap/zapcore/write_syncer.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/AUTHORS delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/CONTRIBUTORS delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/LICENSE delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/PATENTS delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/curve25519/const_amd64.h delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/curve25519/const_amd64.s delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/curve25519/cswap_amd64.s delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/curve25519/curve25519.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/curve25519/doc.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/curve25519/mul_amd64.s delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/curve25519/square_amd64.s delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/ed25519/ed25519.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/internal/chacha20/asm_arm64.s delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/internal/chacha20/chacha_arm64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/internal/chacha20/chacha_generic.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/internal/chacha20/chacha_noasm.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.s delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/internal/chacha20/xor.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/internal/subtle/aliasing.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/internal/subtle/aliasing_appengine.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/poly1305/mac_noasm.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/poly1305/poly1305.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/poly1305/sum_amd64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/poly1305/sum_amd64.s delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/poly1305/sum_arm.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/poly1305/sum_arm.s delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/poly1305/sum_generic.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/poly1305/sum_noasm.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/poly1305/sum_s390x.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/poly1305/sum_s390x.s delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/poly1305/sum_vmsl_s390x.s delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/ssh/buffer.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/ssh/certs.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/ssh/channel.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/ssh/cipher.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/ssh/client.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/ssh/client_auth.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/ssh/common.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/ssh/connection.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/ssh/doc.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/ssh/handshake.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/ssh/kex.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/ssh/keys.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/ssh/mac.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/ssh/messages.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/ssh/mux.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/ssh/server.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/ssh/session.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/ssh/streamlocal.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/ssh/tcpip.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/ssh/terminal/terminal.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/ssh/terminal/util.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/ssh/terminal/util_aix.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/crypto/ssh/transport.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/net/AUTHORS delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/net/CONTRIBUTORS delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/net/LICENSE delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/net/PATENTS delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/net/context/context.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/net/context/go17.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/net/context/go19.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/net/context/pre_go17.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/net/context/pre_go19.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/net/http/httpguts/guts.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/net/http/httpguts/httplex.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/net/http2/.gitignore delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/net/http2/Dockerfile delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/net/http2/Makefile delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/net/http2/README delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/net/http2/ciphers.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/net/http2/client_conn_pool.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/net/http2/databuffer.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/net/http2/errors.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/net/http2/flow.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/net/http2/frame.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/net/http2/go111.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/net/http2/gotrack.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/net/http2/headermap.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/net/http2/hpack/encode.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/net/http2/hpack/hpack.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/net/http2/hpack/huffman.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/net/http2/hpack/tables.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/net/http2/http2.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/net/http2/not_go111.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/net/http2/pipe.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/net/http2/server.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/net/http2/transport.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/net/http2/write.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/net/http2/writesched.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/net/http2/writesched_priority.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/net/http2/writesched_random.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/net/idna/idna.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/net/idna/punycode.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/net/idna/tables.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/net/idna/trie.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/net/idna/trieval.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/net/internal/timeseries/timeseries.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/net/trace/events.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/net/trace/histogram.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/net/trace/trace.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/oauth2/.travis.yml delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/oauth2/AUTHORS delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/oauth2/CONTRIBUTING.md delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/oauth2/CONTRIBUTORS delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/oauth2/LICENSE delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/oauth2/README.md delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/oauth2/go.mod delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/oauth2/go.sum delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/oauth2/google/appengine.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/oauth2/google/appengine_gen1.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/oauth2/google/default.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/oauth2/google/doc.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/oauth2/google/google.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/oauth2/google/jwt.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/oauth2/google/sdk.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/oauth2/internal/client_appengine.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/oauth2/internal/doc.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/oauth2/internal/oauth2.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/oauth2/internal/token.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/oauth2/internal/transport.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/oauth2/jws/jws.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/oauth2/jwt/jwt.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/oauth2/oauth2.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/oauth2/token.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/oauth2/transport.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sync/AUTHORS delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sync/CONTRIBUTORS delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sync/LICENSE delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sync/PATENTS delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sync/semaphore/semaphore.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/AUTHORS delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/CONTRIBUTORS delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/LICENSE delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/PATENTS delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/cpu/byteorder.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/cpu/cpu.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/cpu/cpu_aix_ppc64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/cpu/cpu_arm.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/cpu/cpu_gccgo.c delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/cpu/cpu_gccgo.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/cpu/cpu_linux.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/cpu/cpu_mips64x.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/cpu/cpu_mipsx.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/cpu/cpu_s390x.s delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/cpu/cpu_wasm.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/cpu/cpu_x86.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/cpu/cpu_x86.s delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/.gitignore delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/README.md delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/affinity_linux.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/aliases.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/asm_darwin_386.s delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/asm_darwin_amd64.s delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/asm_darwin_arm.s delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/asm_darwin_arm64.s delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/asm_freebsd_386.s delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/asm_freebsd_arm.s delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/asm_freebsd_arm64.s delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/asm_linux_386.s delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/asm_linux_amd64.s delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/asm_linux_arm.s delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/asm_linux_arm64.s delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/asm_linux_s390x.s delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/asm_netbsd_386.s delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/asm_netbsd_amd64.s delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/asm_netbsd_arm.s delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/asm_netbsd_arm64.s delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/asm_openbsd_386.s delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/asm_openbsd_amd64.s delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/asm_openbsd_arm.s delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/bluetooth_linux.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/cap_freebsd.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/constants.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/dev_aix_ppc.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/dev_darwin.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/dev_dragonfly.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/dev_freebsd.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/dev_linux.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/dev_netbsd.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/dev_openbsd.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/dirent.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/endian_big.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/endian_little.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/env_unix.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/errors_freebsd_386.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/errors_freebsd_arm.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/fcntl.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/fcntl_darwin.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/gccgo.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/gccgo_c.c delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/ioctl.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/mkall.sh delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/mkasm_darwin.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/mkerrors.sh delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/mkpost.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/mksyscall.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/mksyscall_solaris.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/mksysctl_openbsd.pl delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/mksysnum.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/openbsd_pledge.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/openbsd_unveil.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/pagesize_unix.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/race.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/race0.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/sockcmsg_linux.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/sockcmsg_unix.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/str.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/syscall.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/syscall_aix.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/syscall_bsd.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/syscall_darwin.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/syscall_darwin_386.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/syscall_dragonfly.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/syscall_freebsd.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/syscall_linux.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/syscall_linux_386.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/syscall_linux_arm.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/syscall_linux_gc.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/syscall_netbsd.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/syscall_openbsd.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/syscall_solaris.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/syscall_unix.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/syscall_unix_gc.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/timestruct.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/types_aix.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/types_darwin.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/types_dragonfly.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/types_freebsd.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/types_netbsd.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/types_openbsd.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/types_solaris.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/xattr_bsd.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zerrors_linux_386.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zptrace386_linux.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zptracearm_linux.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zptracemips_linux.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zptracemipsle_linux.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_11.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/ztypes_linux_386.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/windows/aliases.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/windows/asm_windows_386.s delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/windows/asm_windows_amd64.s delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/windows/asm_windows_arm.s delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/windows/dll_windows.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/windows/env_windows.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/windows/eventlog.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/windows/exec_windows.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/windows/memory_windows.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/windows/mksyscall.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/windows/race.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/windows/race0.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/windows/security_windows.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/windows/service.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/windows/str.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/windows/syscall.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/windows/syscall_windows.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/windows/types_windows.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/windows/types_windows_386.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/windows/types_windows_amd64.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/windows/types_windows_arm.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/sys/windows/zsyscall_windows.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/text/AUTHORS delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/text/CONTRIBUTORS delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/text/LICENSE delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/text/PATENTS delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/text/secure/bidirule/bidirule.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/text/transform/transform.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/text/unicode/bidi/bidi.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/text/unicode/bidi/bracket.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/text/unicode/bidi/core.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/text/unicode/bidi/gen.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/text/unicode/bidi/prop.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/text/unicode/bidi/trieval.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/text/unicode/norm/composition.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/text/unicode/norm/forminfo.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/text/unicode/norm/input.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/text/unicode/norm/iter.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/text/unicode/norm/maketables.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/text/unicode/norm/normalize.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/text/unicode/norm/readwriter.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/text/unicode/norm/transform.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/text/unicode/norm/trie.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/text/unicode/norm/triegen.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/text/width/gen.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/text/width/gen_common.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/text/width/gen_trieval.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/text/width/kind_string.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/text/width/tables10.0.0.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/text/width/tables9.0.0.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/text/width/transform.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/text/width/trieval.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/text/width/width.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/time/AUTHORS delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/time/CONTRIBUTORS delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/time/LICENSE delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/time/PATENTS delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/time/rate/rate.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/tools/AUTHORS delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/tools/CONTRIBUTORS delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/tools/LICENSE delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/tools/PATENTS delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/tools/go/ast/astutil/imports.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/tools/go/ast/astutil/util.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/tools/go/gcexportdata/importer.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/tools/go/gcexportdata/main.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/tools/go/internal/gcimporter/exportdata.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface10.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface11.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/tools/go/packages/doc.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/tools/go/packages/external.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/tools/go/packages/golist.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/tools/go/packages/golist_overlay.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/tools/go/packages/packages.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/tools/go/packages/visit.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/tools/imports/fix.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/tools/imports/imports.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/tools/imports/mkindex.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/tools/imports/mkstdlib.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/tools/imports/mod.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/tools/imports/sortimports.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/tools/imports/zstdlib.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_fileno.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_ino.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/tools/internal/gopathwalk/walk.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/tools/internal/module/module.go delete mode 100644 cmd/bpa-operator/vendor/golang.org/x/tools/internal/semver/semver.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/api/AUTHORS delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/api/CONTRIBUTORS delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/api/LICENSE delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/api/support/bundler/bundler.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/appengine/.travis.yml delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/appengine/CONTRIBUTING.md delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/appengine/LICENSE delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/appengine/README.md delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/appengine/appengine.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/appengine/appengine_vm.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/appengine/errors.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/appengine/go.mod delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/appengine/go.sum delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/appengine/identity.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/appengine/internal/api.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/appengine/internal/api_classic.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/appengine/internal/api_common.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/appengine/internal/app_id.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/appengine/internal/base/api_base.pb.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/appengine/internal/base/api_base.proto delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/appengine/internal/identity.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/appengine/internal/identity_classic.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/appengine/internal/identity_flex.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/appengine/internal/identity_vm.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/appengine/internal/internal.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/appengine/internal/log/log_service.pb.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/appengine/internal/log/log_service.proto delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/appengine/internal/main.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/appengine/internal/main_common.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/appengine/internal/main_vm.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/appengine/internal/metadata.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/appengine/internal/modules/modules_service.proto delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/appengine/internal/net.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/appengine/internal/regen.sh delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/appengine/internal/transaction.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/appengine/namespace.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/appengine/timeout.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/appengine/travis_install.sh delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/appengine/travis_test.sh delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/appengine/urlfetch/urlfetch.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/genproto/LICENSE delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.pb.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/.travis.yml delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/AUTHORS delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/CONTRIBUTING.md delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/LICENSE delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/Makefile delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/README.md delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/backoff.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/balancer.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/balancer/balancer.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/balancer/base/balancer.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/balancer/base/base.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/balancer_conn_wrappers.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/balancer_v1_wrapper.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/call.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/clientconn.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/codec.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/codegen.sh delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/codes/code_string.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/codes/codes.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/connectivity/connectivity.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/credentials/credentials.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/credentials/internal/syscallconn.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/credentials/tls13.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/dialoptions.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/doc.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/encoding/encoding.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/encoding/proto/proto.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/go.mod delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/go.sum delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/grpclog/grpclog.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/grpclog/logger.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/grpclog/loggerv2.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/install_gae.sh delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/interceptor.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/internal/backoff/backoff.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/internal/binarylog/env_config.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/internal/binarylog/sink.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/internal/binarylog/util.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/internal/channelz/funcs.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/internal/channelz/types.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/internal/channelz/types_linux.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/internal/channelz/util_linux.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/internal/grpcsync/event.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/internal/internal.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/internal/transport/controlbuf.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/internal/transport/defaults.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/internal/transport/handler_server.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/internal/transport/http2_client.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/internal/transport/http2_server.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/internal/transport/http_util.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/internal/transport/log.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/internal/transport/transport.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/keepalive/keepalive.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/metadata/metadata.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/naming/dns_resolver.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/naming/naming.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/peer/peer.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/picker_wrapper.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/pickfirst.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/proxy.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/resolver/resolver.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/resolver_conn_wrapper.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/rpc_util.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/server.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/service_config.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/stats/handlers.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/stats/stats.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/status/status.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/stream.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/tap/tap.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/trace.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/version.go delete mode 100644 cmd/bpa-operator/vendor/google.golang.org/grpc/vet.sh delete mode 100644 cmd/bpa-operator/vendor/gopkg.in/inf.v0/LICENSE delete mode 100644 cmd/bpa-operator/vendor/gopkg.in/inf.v0/dec.go delete mode 100644 cmd/bpa-operator/vendor/gopkg.in/inf.v0/rounder.go delete mode 100644 cmd/bpa-operator/vendor/gopkg.in/ini.v1/.gitignore delete mode 100644 cmd/bpa-operator/vendor/gopkg.in/ini.v1/.travis.yml delete mode 100644 cmd/bpa-operator/vendor/gopkg.in/ini.v1/LICENSE delete mode 100644 cmd/bpa-operator/vendor/gopkg.in/ini.v1/Makefile delete mode 100644 cmd/bpa-operator/vendor/gopkg.in/ini.v1/README.md delete mode 100644 cmd/bpa-operator/vendor/gopkg.in/ini.v1/error.go delete mode 100644 cmd/bpa-operator/vendor/gopkg.in/ini.v1/file.go delete mode 100644 cmd/bpa-operator/vendor/gopkg.in/ini.v1/ini.go delete mode 100644 cmd/bpa-operator/vendor/gopkg.in/ini.v1/key.go delete mode 100644 cmd/bpa-operator/vendor/gopkg.in/ini.v1/parser.go delete mode 100644 cmd/bpa-operator/vendor/gopkg.in/ini.v1/section.go delete mode 100644 cmd/bpa-operator/vendor/gopkg.in/ini.v1/struct.go delete mode 100644 cmd/bpa-operator/vendor/gopkg.in/yaml.v2/.travis.yml delete mode 100644 cmd/bpa-operator/vendor/gopkg.in/yaml.v2/LICENSE delete mode 100644 cmd/bpa-operator/vendor/gopkg.in/yaml.v2/LICENSE.libyaml delete mode 100644 cmd/bpa-operator/vendor/gopkg.in/yaml.v2/NOTICE delete mode 100644 cmd/bpa-operator/vendor/gopkg.in/yaml.v2/README.md delete mode 100644 cmd/bpa-operator/vendor/gopkg.in/yaml.v2/apic.go delete mode 100644 cmd/bpa-operator/vendor/gopkg.in/yaml.v2/decode.go delete mode 100644 cmd/bpa-operator/vendor/gopkg.in/yaml.v2/emitterc.go delete mode 100644 cmd/bpa-operator/vendor/gopkg.in/yaml.v2/encode.go delete mode 100644 cmd/bpa-operator/vendor/gopkg.in/yaml.v2/go.mod delete mode 100644 cmd/bpa-operator/vendor/gopkg.in/yaml.v2/parserc.go delete mode 100644 cmd/bpa-operator/vendor/gopkg.in/yaml.v2/readerc.go delete mode 100644 cmd/bpa-operator/vendor/gopkg.in/yaml.v2/resolve.go delete mode 100644 cmd/bpa-operator/vendor/gopkg.in/yaml.v2/scannerc.go delete mode 100644 cmd/bpa-operator/vendor/gopkg.in/yaml.v2/sorter.go delete mode 100644 cmd/bpa-operator/vendor/gopkg.in/yaml.v2/writerc.go delete mode 100644 cmd/bpa-operator/vendor/gopkg.in/yaml.v2/yaml.go delete mode 100644 cmd/bpa-operator/vendor/gopkg.in/yaml.v2/yamlh.go delete mode 100644 cmd/bpa-operator/vendor/gopkg.in/yaml.v2/yamlprivateh.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/LICENSE delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/admission/v1beta1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/admission/v1beta1/generated.pb.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/admission/v1beta1/generated.proto delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/admission/v1beta1/register.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/admission/v1beta1/types.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/admission/v1beta1/zz_generated.deepcopy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/admissionregistration/v1alpha1/register.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/admissionregistration/v1beta1/register.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/admissionregistration/v1beta1/types.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/apps/v1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/apps/v1/generated.pb.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/apps/v1/generated.proto delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/apps/v1/register.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/apps/v1/types.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/apps/v1beta1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/apps/v1beta1/generated.pb.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/apps/v1beta1/generated.proto delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/apps/v1beta1/register.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/apps/v1beta1/types.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/apps/v1beta2/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/apps/v1beta2/generated.pb.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/apps/v1beta2/generated.proto delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/apps/v1beta2/register.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/apps/v1beta2/types.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/auditregistration/v1alpha1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/auditregistration/v1alpha1/generated.pb.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/auditregistration/v1alpha1/register.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/auditregistration/v1alpha1/types.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/auditregistration/v1alpha1/types_swagger_doc_generated.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/auditregistration/v1alpha1/zz_generated.deepcopy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/authentication/v1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/authentication/v1/generated.pb.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/authentication/v1/generated.proto delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/authentication/v1/register.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/authentication/v1/types.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/authentication/v1beta1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/authentication/v1beta1/generated.proto delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/authentication/v1beta1/register.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/authentication/v1beta1/types.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/authorization/v1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/authorization/v1/generated.pb.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/authorization/v1/generated.proto delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/authorization/v1/register.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/authorization/v1/types.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/authorization/v1beta1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/authorization/v1beta1/generated.proto delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/authorization/v1beta1/register.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/authorization/v1beta1/types.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/autoscaling/v1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/autoscaling/v1/generated.pb.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/autoscaling/v1/generated.proto delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/autoscaling/v1/register.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/autoscaling/v1/types.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/autoscaling/v2beta1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/autoscaling/v2beta1/register.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/autoscaling/v2beta1/types.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/autoscaling/v2beta2/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/autoscaling/v2beta2/register.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/autoscaling/v2beta2/types.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/batch/v1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/batch/v1/generated.pb.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/batch/v1/generated.proto delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/batch/v1/register.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/batch/v1/types.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/batch/v1beta1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/batch/v1beta1/generated.pb.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/batch/v1beta1/generated.proto delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/batch/v1beta1/register.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/batch/v1beta1/types.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/batch/v2alpha1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/batch/v2alpha1/generated.proto delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/batch/v2alpha1/register.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/batch/v2alpha1/types.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/batch/v2alpha1/types_swagger_doc_generated.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/certificates/v1beta1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/certificates/v1beta1/generated.proto delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/certificates/v1beta1/register.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/certificates/v1beta1/types.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/coordination/v1beta1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/coordination/v1beta1/generated.proto delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/coordination/v1beta1/register.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/coordination/v1beta1/types.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/core/v1/annotation_key_constants.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/core/v1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/core/v1/generated.pb.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/core/v1/generated.proto delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/core/v1/objectreference.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/core/v1/register.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/core/v1/resource.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/core/v1/taint.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/core/v1/toleration.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/core/v1/types.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/events/v1beta1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/events/v1beta1/generated.pb.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/events/v1beta1/generated.proto delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/events/v1beta1/register.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/events/v1beta1/types.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/extensions/v1beta1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/extensions/v1beta1/generated.proto delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/extensions/v1beta1/register.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/extensions/v1beta1/types.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/networking/v1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/networking/v1/generated.pb.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/networking/v1/generated.proto delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/networking/v1/register.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/networking/v1/types.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/policy/v1beta1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/policy/v1beta1/generated.pb.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/policy/v1beta1/generated.proto delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/policy/v1beta1/register.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/policy/v1beta1/types.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/rbac/v1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/rbac/v1/generated.pb.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/rbac/v1/generated.proto delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/rbac/v1/register.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/rbac/v1/types.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/rbac/v1alpha1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/rbac/v1alpha1/generated.proto delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/rbac/v1alpha1/register.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/rbac/v1alpha1/types.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/rbac/v1beta1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/rbac/v1beta1/generated.proto delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/rbac/v1beta1/register.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/rbac/v1beta1/types.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/scheduling/v1alpha1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/scheduling/v1alpha1/register.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/scheduling/v1alpha1/types.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/scheduling/v1beta1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/scheduling/v1beta1/generated.proto delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/scheduling/v1beta1/register.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/scheduling/v1beta1/types.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/scheduling/v1beta1/zz_generated.deepcopy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/settings/v1alpha1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/settings/v1alpha1/generated.proto delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/settings/v1alpha1/register.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/settings/v1alpha1/types.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/settings/v1alpha1/types_swagger_doc_generated.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/storage/v1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/storage/v1/generated.pb.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/storage/v1/generated.proto delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/storage/v1/register.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/storage/v1/types.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/storage/v1alpha1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/storage/v1alpha1/generated.proto delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/storage/v1alpha1/register.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/storage/v1alpha1/types.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/storage/v1beta1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/storage/v1beta1/generated.pb.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/storage/v1beta1/generated.proto delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/storage/v1beta1/register.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/storage/v1beta1/types.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apiextensions-apiserver/LICENSE delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/deepcopy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/helpers.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/register.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types_jsonschema.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/conversion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/deepcopy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/defaults.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.pb.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/marshal.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/register.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types_jsonschema.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.conversion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.deepcopy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/zz_generated.defaults.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/zz_generated.deepcopy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/LICENSE delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/api/errors/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/api/meta/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/api/meta/errors.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/api/meta/firsthit_restmapper.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/api/meta/help.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/api/meta/priority.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/api/resource/amount.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/api/resource/math.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/api/resource/scale_int.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/api/resource/suffix.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/conversion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/group_version.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/labels.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_proto.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/watch.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/conversion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/deepcopy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/register.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.defaults.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/conversion/converter.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/conversion/deep_equal.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/conversion/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/conversion/helper.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/fields/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/fields/fields.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/fields/requirements.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/fields/selector.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/labels/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/labels/labels.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/labels/selector.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/runtime/codec.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/runtime/codec_check.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/runtime/converter.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/runtime/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/runtime/embedded.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/runtime/error.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/runtime/extension.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/runtime/helper.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/runtime/register.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/runtime/schema/interfaces.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/runtime/scheme_builder.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/meta.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/runtime/serializer/negotiated_codec.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf_extension.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/recognizer.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/runtime/swagger_doc_generator.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/runtime/types.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/selection/operator.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/types/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/types/nodename.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/types/patch.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/types/uid.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/util/cache/cache.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/util/errors/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/util/json/json.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/util/mergepatch/errors.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/util/naming/from_stack.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/util/net/http.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/util/net/interface.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/util/net/port_range.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/util/net/port_split.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/util/net/util.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/util/sets/empty.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/util/sets/int.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/util/sets/string.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/errors.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/types.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/util/uuid/uuid.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/util/validation/field/path.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/util/wait/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/version/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/version/helpers.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/version/types.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/watch/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/watch/filter.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/watch/mux.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/watch/watch.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/LICENSE delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/discovery/cached_discovery.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/discovery/discovery_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/discovery/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/discovery/fake/discovery.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/discovery/helper.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/discovery/round_tripper.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/dynamic/fake/simple.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/dynamic/interface.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/dynamic/scheme.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/dynamic/simple.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/clientset.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/fake/clientset_generated.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/fake/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/fake/register.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/import.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/scheme/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/scheme/register.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_admissionregistration_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_initializerconfiguration.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/initializerconfiguration.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_admissionregistration_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_mutatingwebhookconfiguration.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/fake/fake_validatingwebhookconfiguration.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/generated_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_apps_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_controllerrevision.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_daemonset.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_deployment.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_replicaset.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/fake/fake_statefulset.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/generated_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_apps_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_controllerrevision.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_deployment.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake/fake_statefulset.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/generated_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_apps_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_controllerrevision.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_daemonset.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_deployment.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_replicaset.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake/fake_statefulset.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/generated_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditregistration_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditsink.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/fake/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/fake/fake_auditregistration_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/fake/fake_auditsink.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/generated_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_authentication_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/fake/fake_tokenreview_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_authentication_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake/fake_tokenreview_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_authorization_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_localsubjectaccessreview_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectaccessreview_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_selfsubjectrulesreview_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/fake/fake_subjectaccessreview_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/generated_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_authorization_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_generated_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_localsubjectaccessreview_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectaccessreview_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_selfsubjectrulesreview_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake/fake_subjectaccessreview_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/generated_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_autoscaling_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake/fake_horizontalpodautoscaler.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/generated_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_autoscaling_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/fake/fake_horizontalpodautoscaler.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/generated_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/autoscaling_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_autoscaling_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/fake/fake_horizontalpodautoscaler.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/generated_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_batch_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/fake/fake_job.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/generated_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_batch_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/fake/fake_cronjob.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/generated_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/batch_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/fake_batch_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake/fake_cronjob.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/generated_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificates_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake/fake_certificatesigningrequest_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/generated_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/coordination_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_coordination_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/fake/fake_lease.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/generated_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/core/v1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_componentstatus.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_configmap.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_core_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_endpoints.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_event_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_limitrange.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_namespace_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_node_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolume.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_persistentvolumeclaim.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_podtemplate.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_replicationcontroller.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_resourcequota.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_secret.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_service_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_serviceaccount_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/core/v1/generated_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_event.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/fake/fake_events_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/generated_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_daemonset.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_deployment_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_extensions_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_ingress.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_podsecuritypolicy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake/fake_replicaset.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networking_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/fake/fake_networkpolicy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/generated_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_poddisruptionbudget.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_podsecuritypolicy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_policy_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/generated_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrole.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_clusterrolebinding.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rbac_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_role.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/fake/fake_rolebinding.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/generated_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrole.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_clusterrolebinding.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rbac_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_role.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake/fake_rolebinding.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/generated_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrole.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_clusterrolebinding.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rbac_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_role.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake/fake_rolebinding.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/generated_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_priorityclass.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake/fake_scheduling_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/generated_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_priorityclass.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/fake/fake_scheduling_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/generated_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/scheduling_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_podpreset.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake/fake_settings_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/generated_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storage_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_storageclass.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/fake/fake_volumeattachment.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_storage_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/fake/fake_volumeattachment.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/generated_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storage_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_storageclass.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattachment.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/pkg/apis/clientauthentication/register.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/register.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.deepcopy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.defaults.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/conversion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/register.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.deepcopy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.defaults.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/pkg/version/.gitattributes delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/pkg/version/base.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/pkg/version/def.bzl delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/pkg/version/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/pkg/version/version.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/plugin/pkg/client/auth/OWNERS delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/plugin/pkg/client/auth/azure/README.md delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/plugin/pkg/client/auth/azure/azure.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/OWNERS delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/gcp.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/plugin/pkg/client/auth/oidc/OWNERS delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/plugin/pkg/client/auth/oidc/oidc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/plugin/pkg/client/auth/openstack/openstack.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/plugin/pkg/client/auth/plugins.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/rest/OWNERS delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/rest/client.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/rest/config.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/rest/plugin.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/rest/request.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/rest/transport.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/rest/url_utils.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/rest/urlbackoff.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/rest/watch/decoder.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/rest/watch/encoder.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/restmapper/category_expansion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/restmapper/discovery.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/restmapper/shortcut.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/testing/actions.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/testing/fake.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/testing/fixture.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/third_party/forked/golang/template/exec.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/third_party/forked/golang/template/funcs.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/auth/OWNERS delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/auth/clientauth.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/cache/OWNERS delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/cache/controller.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/cache/delta_fifo.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/cache/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/cache/expiration_cache.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/cache/fake_custom_store.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/cache/fifo.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/cache/heap.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/cache/index.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/cache/listers.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/cache/listwatch.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/cache/mutation_cache.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/cache/mutation_detector.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/cache/reflector.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/cache/reflector_metrics.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/cache/shared_informer.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/cache/store.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/cache/undelta_store.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/clientcmd/api/latest/latest.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/clientcmd/api/register.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/clientcmd/api/types.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/clientcmd/api/v1/conversion.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/clientcmd/api/v1/register.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.deepcopy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/clientcmd/auth_loaders.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/clientcmd/client_config.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/clientcmd/config.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/clientcmd/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/clientcmd/flag.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/clientcmd/helpers.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/clientcmd/loader.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/clientcmd/overrides.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/clientcmd/validation.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/leaderelection/OWNERS delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/leaderelection/healthzadaptor.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/metrics/OWNERS delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/metrics/metrics.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/pager/pager.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/record/OWNERS delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/record/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/record/event.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/record/events_cache.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/record/fake.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/tools/reference/ref.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/transport/OWNERS delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/transport/cache.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/transport/config.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/transport/round_trippers.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/transport/token_source.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/transport/transport.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/util/buffer/ring_growing.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/util/cert/OWNERS delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/util/cert/cert.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/util/cert/csr.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/util/cert/io.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/util/cert/pem.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/util/connrotation/connrotation.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/util/flowcontrol/backoff.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/util/flowcontrol/throttle.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/util/homedir/homedir.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/util/integer/integer.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/util/jsonpath/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/util/jsonpath/jsonpath.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/util/jsonpath/node.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/util/jsonpath/parser.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/util/retry/OWNERS delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/util/retry/util.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/util/workqueue/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/util/workqueue/metrics.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/util/workqueue/parallelizer.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/util/workqueue/queue.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/client-go/util/workqueue/rate_limitting_queue.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/gengo/LICENSE delete mode 100644 cmd/bpa-operator/vendor/k8s.io/gengo/args/args.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/gengo/generator/default_generator.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/gengo/generator/default_package.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/gengo/generator/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/gengo/generator/error_tracker.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/gengo/generator/execute.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/gengo/generator/generator.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/gengo/generator/import_tracker.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/gengo/generator/snippet_writer.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/gengo/namer/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/gengo/namer/import_tracker.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/gengo/namer/namer.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/gengo/namer/order.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/gengo/namer/plural_namer.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/gengo/parser/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/gengo/parser/parse.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/gengo/types/comments.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/gengo/types/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/gengo/types/flatten.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/gengo/types/types.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/klog/.travis.yml delete mode 100644 cmd/bpa-operator/vendor/k8s.io/klog/CONTRIBUTING.md delete mode 100644 cmd/bpa-operator/vendor/k8s.io/klog/LICENSE delete mode 100644 cmd/bpa-operator/vendor/k8s.io/klog/OWNERS delete mode 100644 cmd/bpa-operator/vendor/k8s.io/klog/README.md delete mode 100644 cmd/bpa-operator/vendor/k8s.io/klog/RELEASE.md delete mode 100644 cmd/bpa-operator/vendor/k8s.io/klog/SECURITY_CONTACTS delete mode 100644 cmd/bpa-operator/vendor/k8s.io/klog/code-of-conduct.md delete mode 100644 cmd/bpa-operator/vendor/k8s.io/klog/klog.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/klog/klog_file.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/kube-openapi/LICENSE delete mode 100644 cmd/bpa-operator/vendor/k8s.io/kube-openapi/pkg/common/common.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/kube-openapi/pkg/common/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/kube-openapi/pkg/util/proto/OWNERS delete mode 100644 cmd/bpa-operator/vendor/k8s.io/kube-openapi/pkg/util/proto/doc.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/kube-state-metrics/LICENSE delete mode 100644 cmd/bpa-operator/vendor/k8s.io/kube-state-metrics/pkg/collector/collector.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/kube-state-metrics/pkg/metric/family.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/kube-state-metrics/pkg/metric/generator.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/kube-state-metrics/pkg/metric/metric.go delete mode 100644 cmd/bpa-operator/vendor/k8s.io/kube-state-metrics/pkg/metrics_store/metrics_store.go delete mode 100644 cmd/bpa-operator/vendor/modules.txt delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/LICENSE delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/deleg_map.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/config.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/config/doc.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/client.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/doc.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controller.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/controller/doc.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/event/doc.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/event/event.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/handler/doc.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_mapped.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/handler/enqueue_owner.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/handler/eventhandler.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics/metrics.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/filter.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/internal/recorder/recorder.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/leaderelection/doc.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/leaderelection/leader_election.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/manager/doc.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/manager/internal.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/manager/manager.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/manager/testutil.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/client_go_adapter.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/doc.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/listener.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/metrics/registry.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/patch/doc.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/patch/patch.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/doc.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/doc.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/reconcile.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/recorder/recorder.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/doc.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/inject.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/log/deleg.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/log/kube_helpers.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/log/log.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/log/null.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/scheme/scheme.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/signals/doc.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/signals/signal.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/signals/signal_posix.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/signals/signal_windows.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/source/doc.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/source/internal/eventsource.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/source/source.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/decode.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/doc.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/http.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/response.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/types/types.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/webhook.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics/metrics.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/types/webhook.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-tools/LICENSE delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-tools/pkg/crd/generator/generator.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-tools/pkg/crd/util/util.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/apis.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/context.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/crd.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/index.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/parser.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/parse/util.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-tools/pkg/internal/codegen/types.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-tools/pkg/internal/general/util.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/controller-tools/pkg/util/util.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/yaml/.gitignore delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/yaml/.travis.yml delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/yaml/CONTRIBUTING.md delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/yaml/LICENSE delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/yaml/OWNERS delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/yaml/README.md delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/yaml/RELEASE.md delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/yaml/SECURITY_CONTACTS delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/yaml/code-of-conduct.md delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/yaml/fields.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/yaml/yaml.go delete mode 100644 cmd/bpa-operator/vendor/sigs.k8s.io/yaml/yaml_go110.go delete mode 100644 cmd/bpa-operator/version/version.go diff --git a/cmd/bpa-operator/.gitignore b/cmd/bpa-operator/.gitignore deleted file mode 100644 index 7c50470..0000000 --- a/cmd/bpa-operator/.gitignore +++ /dev/null @@ -1,77 +0,0 @@ -# Temporary Build Files -build/_output -build/_test -# Created by https://www.gitignore.io/api/go,vim,emacs,visualstudiocode -### Emacs ### -# -*- mode: gitignore; -*- -*~ -\#*\# -/.emacs.desktop -/.emacs.desktop.lock -*.elc -auto-save-list -tramp -.\#* -# Org-mode -.org-id-locations -*_archive -# flymake-mode -*_flymake.* -# eshell files -/eshell/history -/eshell/lastdir -# elpa packages -/elpa/ -# reftex files -*.rel -# AUCTeX auto folder -/auto/ -# cask packages -.cask/ -dist/ -# Flycheck -flycheck_*.el -# server auth directory -/server/ -# projectiles files -.projectile -projectile-bookmarks.eld -# directory configuration -.dir-locals.el -# saveplace -places -# url cache -url/cache/ -# cedet -ede-projects.el -# smex -smex-items -# company-statistics -company-statistics-cache.el -# anaconda-mode -anaconda-mode/ -### Go ### -# Binaries for programs and plugins -*.exe -*.exe~ -*.dll -*.so -*.dylib -# Test binary, build with 'go test -c' -*.test -# Output of the go coverage tool, specifically when used with LiteIDE -*.out -### Vim ### -# swap -.sw[a-p] -.*.sw[a-p] -# session -Session.vim -# temporary -.netrwhist -# auto-generated tag files -tags -### VisualStudioCode ### -.vscode/* -.history -# End of https://www.gitignore.io/api/go,vim,emacs,visualstudiocode diff --git a/cmd/bpa-operator/Makefile b/cmd/bpa-operator/Makefile deleted file mode 100644 index 1f24da4..0000000 --- a/cmd/bpa-operator/Makefile +++ /dev/null @@ -1,67 +0,0 @@ -.PHONY: build -build: - go build -o build/_output/bin/bpa-operator cmd/manager/main.go - -docker: - docker build --rm -t akraino.org/icn/bpa-operator:latest . -f build/Dockerfile - git clone https://github.com/onap/multicloud-k8s.git - cd multicloud-k8s && \ - docker build --network=host --rm \ - --build-arg http_proxy=${http_proxy} \ - --build-arg HTTP_PROXY=${HTTP_PROXY} \ - --build-arg https_proxy=${https_proxy} \ - --build-arg HTTPS_PROXY=${HTTPS_PROXY} \ - --build-arg no_proxy=${no_proxy} \ - --build-arg NO_PROXY=${NO_PROXY} \ - -t github.com/onap/multicloud-k8s:latest . -f kud/build/Dockerfile - rm -rf multicloud-k8s - -docker_e2e: - docker build --rm -t akraino.org/icn/bpa-operator:latest . -f build/Dockerfile - git clone https://github.com/onap/multicloud-k8s.git - cd multicloud-k8s && \ - docker build --network=host --rm \ - --build-arg http_proxy=${http_proxy} \ - --build-arg HTTP_PROXY=${HTTP_PROXY} \ - --build-arg https_proxy=${https_proxy} \ - --build-arg HTTPS_PROXY=${HTTPS_PROXY} \ - --build-arg no_proxy=${no_proxy} \ - --build-arg NO_PROXY=${NO_PROXY} \ - --build-arg KUD_ENABLE_TESTS=true \ - -t github.com/onap/multicloud-k8s:latest . -f kud/build/Dockerfile - rm -rf multicloud-k8s - -.PHONY: deploy -deploy: - kubectl apply -f deploy/service_account.yaml - kubectl apply -f deploy/role.yaml - kubectl apply -f deploy/role_binding.yaml - kubectl apply -f deploy/crds/provisioning-crd/bpa_v1alpha1_provisioning_crd.yaml - kubectl apply -f deploy/crds/software-crd/bpa_v1alpha1_software_crd.yaml - kubectl apply -f deploy/kud-installer.yaml - kubectl apply -f deploy/operator.yaml - kubectl create secret generic ssh-key-secret --from-file=id_rsa=/root/.ssh/id_rsa --from-file=id_rsa.pub=/root/.ssh/id_rsa.pub - -.PHONY: delete -delete: - kubectl delete -f deploy/service_account.yaml - kubectl delete -f deploy/role.yaml - kubectl delete -f deploy/role_binding.yaml - kubectl delete -f deploy/crds/provisioning-crd/bpa_v1alpha1_provisioning_crd.yaml - kubectl delete -f deploy/crds/software-crd/bpa_v1alpha1_software_crd.yaml - kubectl delete -f deploy/kud-installer.yaml - kubectl delete -f deploy/operator.yaml - kubectl delete secret ssh-key-secret - -.PHONY: unit_test -unit_test: - go test ./pkg/controller/provisioning/ - -.PHONY: e2etest_bmh -e2etest_bmh: - ./e2etest/bpa_bmh_verifier.sh provision - ./e2etest/bpa_bmh_verifier.sh teardown - -.PHONY: provision -provision: - ./e2etest/bpa_bmh_verifier.sh provision diff --git a/cmd/bpa-operator/bpa_operator_launch.sh b/cmd/bpa-operator/bpa_operator_launch.sh deleted file mode 100755 index ae118d7..0000000 --- a/cmd/bpa-operator/bpa_operator_launch.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env bash -set -eu -o pipefail - -#Get Go ENV variables -eval "$(go env)" - -export GO111MODULE=on -go get -d github.com/operator-framework/operator-sdk # This will download the git repository and not install it -pushd $GOPATH/src/github.com/operator-framework/operator-sdk -git checkout master -make tidy -make install -popd - -#Copy bpa operator directory to the right path -kubectl create -f $PWD/deploy/crds/provisioning-crd/bpa_v1alpha1_provisioning_crd.yaml -kubectl create -f $PWD/deploy/crds/software-crd/bpa_v1alpha1_software_crd.yaml -echo $GOPATH -mkdir -p $GOPATH/src/github.com/ && cp -r $PWD $GOPATH/src/github.com/bpa-operator -pushd $GOPATH/src/github.com/bpa-operator -operator-sdk up local --kubeconfig $HOME/.kube/config -popd diff --git a/cmd/bpa-operator/build/Dockerfile b/cmd/bpa-operator/build/Dockerfile deleted file mode 100644 index c4ed325..0000000 --- a/cmd/bpa-operator/build/Dockerfile +++ /dev/null @@ -1,8 +0,0 @@ -FROM golang:1.12 AS builder -WORKDIR /go/src/github.com/bpa-operator -COPY . . -RUN make build - -FROM golang:1.12 - -COPY --from=builder /go/src/github.com/bpa-operator/build/_output/bin/bpa-operator / diff --git a/cmd/bpa-operator/build/bin/entrypoint b/cmd/bpa-operator/build/bin/entrypoint deleted file mode 100755 index 4044594..0000000 --- a/cmd/bpa-operator/build/bin/entrypoint +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/sh -e - -# This is documented here: -# https://docs.openshift.com/container-platform/3.11/creating_images/guidelines.html#openshift-specific-guidelines - -if ! whoami &>/dev/null; then - if [ -w /etc/passwd ]; then - echo "${USER_NAME:-bpa-operator}:x:$(id -u):$(id -g):${USER_NAME:-bpa-operator} user:${HOME}:/sbin/nologin" >> /etc/passwd - fi -fi - -exec ${OPERATOR} $@ diff --git a/cmd/bpa-operator/build/bin/user_setup b/cmd/bpa-operator/build/bin/user_setup deleted file mode 100755 index 1e36064..0000000 --- a/cmd/bpa-operator/build/bin/user_setup +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/sh -set -x - -# ensure $HOME exists and is accessible by group 0 (we don't know what the runtime UID will be) -mkdir -p ${HOME} -chown ${USER_UID}:0 ${HOME} -chmod ug+rwx ${HOME} - -# runtime user will need to be able to self-insert in /etc/passwd -chmod g+rw /etc/passwd - -# no need for this script to remain in the image after running -rm $0 diff --git a/cmd/bpa-operator/cmd/manager/main.go b/cmd/bpa-operator/cmd/manager/main.go deleted file mode 100644 index 406803c..0000000 --- a/cmd/bpa-operator/cmd/manager/main.go +++ /dev/null @@ -1,162 +0,0 @@ -package main - -import ( - "context" - "flag" - "fmt" - "os" - "runtime" - - // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) - _ "k8s.io/client-go/plugin/pkg/client/auth" - "k8s.io/client-go/rest" - - "github.com/bpa-operator/pkg/apis" - "github.com/bpa-operator/pkg/controller" - - "github.com/operator-framework/operator-sdk/pkg/k8sutil" - kubemetrics "github.com/operator-framework/operator-sdk/pkg/kube-metrics" - "github.com/operator-framework/operator-sdk/pkg/leader" - "github.com/operator-framework/operator-sdk/pkg/log/zap" - "github.com/operator-framework/operator-sdk/pkg/metrics" - "github.com/operator-framework/operator-sdk/pkg/restmapper" - sdkVersion "github.com/operator-framework/operator-sdk/version" - "github.com/spf13/pflag" - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/intstr" - "sigs.k8s.io/controller-runtime/pkg/client/config" - "sigs.k8s.io/controller-runtime/pkg/manager" - logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" - "sigs.k8s.io/controller-runtime/pkg/runtime/signals" -) - -// Change below variables to serve metrics on different host or port. -var ( - metricsHost = "0.0.0.0" - metricsPort int32 = 8383 - operatorMetricsPort int32 = 8686 -) -var log = logf.Log.WithName("cmd") - -func printVersion() { - log.Info(fmt.Sprintf("Go Version: %s", runtime.Version())) - log.Info(fmt.Sprintf("Go OS/Arch: %s/%s", runtime.GOOS, runtime.GOARCH)) - log.Info(fmt.Sprintf("Version of operator-sdk: %v", sdkVersion.Version)) -} - -func main() { - // Add the zap logger flag set to the CLI. The flag set must - // be added before calling pflag.Parse(). - pflag.CommandLine.AddFlagSet(zap.FlagSet()) - - // Add flags registered by imported packages (e.g. glog and - // controller-runtime) - pflag.CommandLine.AddGoFlagSet(flag.CommandLine) - - pflag.Parse() - - // Use a zap logr.Logger implementation. If none of the zap - // flags are configured (or if the zap flag set is not being - // used), this defaults to a production zap logger. - // - // The logger instantiated here can be changed to any logger - // implementing the logr.Logger interface. This logger will - // be propagated through the whole operator, generating - // uniform and structured logs. - logf.SetLogger(zap.Logger()) - - printVersion() - - namespace, err := k8sutil.GetWatchNamespace() - if err != nil { - log.Error(err, "Failed to get watch namespace") - os.Exit(1) - } - - // Get a config to talk to the apiserver - cfg, err := config.GetConfig() - if err != nil { - log.Error(err, "") - os.Exit(1) - } - - ctx := context.TODO() - // Become the leader before proceeding - err = leader.Become(ctx, "bpa-operator-lock") - if err != nil { - log.Error(err, "") - os.Exit(1) - } - - // Create a new Cmd to provide shared dependencies and start components - mgr, err := manager.New(cfg, manager.Options{ - Namespace: namespace, - MapperProvider: restmapper.NewDynamicRESTMapper, - MetricsBindAddress: fmt.Sprintf("%s:%d", metricsHost, metricsPort), - }) - if err != nil { - log.Error(err, "") - os.Exit(1) - } - - log.Info("Registering Components.") - - // Setup Scheme for all resources - if err := apis.AddToScheme(mgr.GetScheme()); err != nil { - log.Error(err, "") - os.Exit(1) - } - - // Setup all Controllers - if err := controller.AddToManager(mgr); err != nil { - log.Error(err, "") - os.Exit(1) - } - - if err = serveCRMetrics(cfg); err != nil { - log.Info("Could not generate and serve custom resource metrics", "error", err.Error()) - } - - // Add to the below struct any other metrics ports you want to expose. - servicePorts := []v1.ServicePort{ - {Port: metricsPort, Name: metrics.OperatorPortName, Protocol: v1.ProtocolTCP, TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: metricsPort}}, - {Port: operatorMetricsPort, Name: metrics.CRPortName, Protocol: v1.ProtocolTCP, TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: operatorMetricsPort}}, - } - // Create Service object to expose the metrics port(s). - _, err = metrics.CreateMetricsService(ctx, cfg, servicePorts) - if err != nil { - log.Info(err.Error()) - } - - log.Info("Starting the Cmd.") - - // Start the Cmd - if err := mgr.Start(signals.SetupSignalHandler()); err != nil { - log.Error(err, "Manager exited non-zero") - os.Exit(1) - } -} - -// serveCRMetrics gets the Operator/CustomResource GVKs and generates metrics based on those types. -// It serves those metrics on "http://metricsHost:operatorMetricsPort". -func serveCRMetrics(cfg *rest.Config) error { - // Below function returns filtered operator/CustomResource specific GVKs. - // For more control override the below GVK list with your own custom logic. - filteredGVK, err := k8sutil.GetGVKsFromAddToScheme(apis.AddToScheme) - if err != nil { - return err - } - // Get the namespace the operator is currently deployed in. - operatorNs, err := k8sutil.GetOperatorNamespace() - if err != nil { - return err - } - // To generate metrics in other namespaces, add the values below. - ns := []string{operatorNs} - // Generate and serve custom resource specific metrics. - err = kubemetrics.GenerateAndServeCRMetrics(cfg, ns, filteredGVK, metricsHost, operatorMetricsPort) - if err != nil { - return err - } - return nil -} diff --git a/cmd/bpa-operator/deploy/crds/provisioning-crd/bpa_v1alpha1_provisioning_cr.yaml b/cmd/bpa-operator/deploy/crds/provisioning-crd/bpa_v1alpha1_provisioning_cr.yaml deleted file mode 100644 index 0c62cbc..0000000 --- a/cmd/bpa-operator/deploy/crds/provisioning-crd/bpa_v1alpha1_provisioning_cr.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: bpa.akraino.org/v1alpha1 -kind: Provisioning -metadata: - name: provisioning-sample-2 - labels: - cluster: cluster-xyz - owner: c1 -spec: - masters: - - master-1: - mac-address: 00:c6:14:04:61:b2 - - master-2: - mac-address: 00:c5:12:06:61:b2 - workers: - - worker-1: - mac-address: 00:c4:13:04:62:b5 diff --git a/cmd/bpa-operator/deploy/crds/provisioning-crd/bpa_v1alpha1_provisioning_cr_2.yaml b/cmd/bpa-operator/deploy/crds/provisioning-crd/bpa_v1alpha1_provisioning_cr_2.yaml deleted file mode 100644 index 179208e..0000000 --- a/cmd/bpa-operator/deploy/crds/provisioning-crd/bpa_v1alpha1_provisioning_cr_2.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: bpa.akraino.org/v1alpha1 -kind: Provisioning -metadata: - name: provisioning-sample-two - labels: - cluster: cluster-abc - owner: c1 -spec: - masters: - - master-1: - mac-address: a4:bf:0f:63:85:66 - workers: - - worker-1: - mac-address: b5:bf:0f:63:85:61 - - worker-2: - mac-address: 34:f2:fd:9c:87:62 diff --git a/cmd/bpa-operator/deploy/crds/provisioning-crd/bpa_v1alpha1_provisioning_cr_multiple_3.yaml b/cmd/bpa-operator/deploy/crds/provisioning-crd/bpa_v1alpha1_provisioning_cr_multiple_3.yaml deleted file mode 100644 index 3a34dba..0000000 --- a/cmd/bpa-operator/deploy/crds/provisioning-crd/bpa_v1alpha1_provisioning_cr_multiple_3.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: bpa.akraino.org/v1alpha1 -kind: Provisioning -metadata: - name: provisioning-test-2 - labels: - cluster: cluster-yyy - owner: c1 -spec: - masters: - - master-1: - mac-address: 00:c6:14:04:61:b2 - - master-2: - mac-address: 00:c5:12:06:61:b2 - workers: - - worker-1: - mac-address: 00:c4:13:04:62:b5 - - worker-2: - mac-address: 00:c3:11:02:61:b5 diff --git a/cmd/bpa-operator/deploy/crds/provisioning-crd/bpa_v1alpha1_provisioning_cr_sample_multiple_1.yaml b/cmd/bpa-operator/deploy/crds/provisioning-crd/bpa_v1alpha1_provisioning_cr_sample_multiple_1.yaml deleted file mode 100644 index 2c3c906..0000000 --- a/cmd/bpa-operator/deploy/crds/provisioning-crd/bpa_v1alpha1_provisioning_cr_sample_multiple_1.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: bpa.akraino.org/v1alpha1 -kind: Provisioning -metadata: - name: provisioning-test-1 - labels: - cluster: cluster-abc - owner: c1 -spec: - masters: - - master-1: - mac-address: 00:c6:14:04:61:b2 - workers: - - worker-1: - mac-address: 00:c4:13:04:62:b5 diff --git a/cmd/bpa-operator/deploy/crds/provisioning-crd/bpa_v1alpha1_provisioning_cr_sample_multiple_2.yaml b/cmd/bpa-operator/deploy/crds/provisioning-crd/bpa_v1alpha1_provisioning_cr_sample_multiple_2.yaml deleted file mode 100644 index a9c5b5c..0000000 --- a/cmd/bpa-operator/deploy/crds/provisioning-crd/bpa_v1alpha1_provisioning_cr_sample_multiple_2.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: bpa.akraino.org/v1alpha1 -kind: Provisioning -metadata: - name: provisioning-test-2 - labels: - cluster: cluster-xyz - owner: c1 -spec: - masters: - - master-1: - mac-address: 00:c6:14:04:61:b2 - - master-2: - mac-address: 00:c5:12:06:61:b2 - workers: - - worker-1: - mac-address: 00:c4:13:04:62:b5 - - worker-2: - mac-address: 00:c3:11:02:61:b5 diff --git a/cmd/bpa-operator/deploy/crds/provisioning-crd/bpa_v1alpha1_provisioning_cr_sample_single.yaml b/cmd/bpa-operator/deploy/crds/provisioning-crd/bpa_v1alpha1_provisioning_cr_sample_single.yaml deleted file mode 100644 index cfff1c5..0000000 --- a/cmd/bpa-operator/deploy/crds/provisioning-crd/bpa_v1alpha1_provisioning_cr_sample_single.yaml +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: bpa.akraino.org/v1alpha1 -kind: Provisioning -metadata: - name: provisioning-sample-real - labels: - cluster: cluster-xyz - owner: c1 -spec: - masters: - - master-1: - mac-address: 90:e2:ba:cd:df:bd diff --git a/cmd/bpa-operator/deploy/crds/provisioning-crd/bpa_v1alpha1_provisioning_cr_sample_single_2.yaml b/cmd/bpa-operator/deploy/crds/provisioning-crd/bpa_v1alpha1_provisioning_cr_sample_single_2.yaml deleted file mode 100644 index 6e973e6..0000000 --- a/cmd/bpa-operator/deploy/crds/provisioning-crd/bpa_v1alpha1_provisioning_cr_sample_single_2.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: bpa.akraino.org/v1alpha1 -kind: Provisioning -metadata: - name: provisioning-sample-real-2 - labels: - cluster: cluster-xyz - owner: c1 -spec: - masters: - - master-1: - mac-address: 90:e2:ba:cd:df:bd - workers: - - master-1: - mac-address: 90:e2:ba:cd:df:bd diff --git a/cmd/bpa-operator/deploy/crds/provisioning-crd/bpa_v1alpha1_provisioning_cr_sample_single_3.yaml b/cmd/bpa-operator/deploy/crds/provisioning-crd/bpa_v1alpha1_provisioning_cr_sample_single_3.yaml deleted file mode 100644 index 9209398..0000000 --- a/cmd/bpa-operator/deploy/crds/provisioning-crd/bpa_v1alpha1_provisioning_cr_sample_single_3.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: bpa.akraino.org/v1alpha1 -kind: Provisioning -metadata: - name: provisioning-sample-real-3 - labels: - cluster: cluster-xyz - owner: c1 -spec: - masters: - - master-1: - mac-address: 90:e2:ba:cd:df:bd - workers: - - master-1: #colon must be put even if MAC address is not specified diff --git a/cmd/bpa-operator/deploy/crds/provisioning-crd/bpa_v1alpha1_provisioning_cr_vm_multiple.yaml b/cmd/bpa-operator/deploy/crds/provisioning-crd/bpa_v1alpha1_provisioning_cr_vm_multiple.yaml deleted file mode 100644 index be026f8..0000000 --- a/cmd/bpa-operator/deploy/crds/provisioning-crd/bpa_v1alpha1_provisioning_cr_vm_multiple.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: bpa.akraino.org/v1alpha1 -kind: Provisioning -metadata: - name: provisioning-test-vm-2 - labels: - cluster: vm-cluster-2 - cluster-type: virtlet-vm - owner: c1 -spec: - masters: - - master-1: - mac-address: 00:c6:14:04:61:b2 - workers: - - worker-1: - mac-address: 00:c4:13:04:62:b5 diff --git a/cmd/bpa-operator/deploy/crds/provisioning-crd/bpa_v1alpha1_provisioning_cr_vm_single.yaml b/cmd/bpa-operator/deploy/crds/provisioning-crd/bpa_v1alpha1_provisioning_cr_vm_single.yaml deleted file mode 100644 index a780297..0000000 --- a/cmd/bpa-operator/deploy/crds/provisioning-crd/bpa_v1alpha1_provisioning_cr_vm_single.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: bpa.akraino.org/v1alpha1 -kind: Provisioning -metadata: - name: provisioning-test-vm-1 - labels: - cluster: vm-cluster-1 - cluster-type: virtlet-vm - owner: c1 -spec: - masters: - - master-1: - mac-address: c2:b4:57:49:47:f1 diff --git a/cmd/bpa-operator/deploy/crds/provisioning-crd/bpa_v1alpha1_provisioning_crd.yaml b/cmd/bpa-operator/deploy/crds/provisioning-crd/bpa_v1alpha1_provisioning_crd.yaml deleted file mode 100644 index 778b194..0000000 --- a/cmd/bpa-operator/deploy/crds/provisioning-crd/bpa_v1alpha1_provisioning_crd.yaml +++ /dev/null @@ -1,40 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: provisionings.bpa.akraino.org -spec: - group: bpa.akraino.org - names: - kind: Provisioning - listKind: ProvisioningList - plural: provisionings - singular: provisioning - shortNames: - - bpa - scope: Namespaced - subresources: - status: {} - validation: - openAPIV3Schema: - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - type: object - status: - type: object - version: v1alpha1 - versions: - - name: v1alpha1 - served: true - storage: true diff --git a/cmd/bpa-operator/deploy/crds/provisioning-crd/provisioning_cr_kud_plugins_sample.yaml b/cmd/bpa-operator/deploy/crds/provisioning-crd/provisioning_cr_kud_plugins_sample.yaml deleted file mode 100644 index 852f521..0000000 --- a/cmd/bpa-operator/deploy/crds/provisioning-crd/provisioning_cr_kud_plugins_sample.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: bpa.akraino.org/v1alpha1 -kind: Provisioning -metadata: - name: sample-kud-plugins - labels: - cluster: cluster-efg - owner: c2 -spec: - masters: - - master-1: - mac-address: 00:e1:ba:ce:df:bd - KUDPlugins: - - emco diff --git a/cmd/bpa-operator/deploy/crds/software-crd/bpa_v1alpha1_software_cr.yaml b/cmd/bpa-operator/deploy/crds/software-crd/bpa_v1alpha1_software_cr.yaml deleted file mode 100644 index f5f35ba..0000000 --- a/cmd/bpa-operator/deploy/crds/software-crd/bpa_v1alpha1_software_cr.yaml +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: bpa.akraino.org/v1alpha1 -kind: Software -metadata: - labels: - cluster: cluster-xyz - owner: c1 - name: sample-software -spec: - masterSoftware: - - expect - - htop - - jq: - version: 1.5+dfsg-1ubuntu0.1 - - maven - workerSoftware: - - curl - - python - - tmux - - jq diff --git a/cmd/bpa-operator/deploy/crds/software-crd/bpa_v1alpha1_software_crd.yaml b/cmd/bpa-operator/deploy/crds/software-crd/bpa_v1alpha1_software_crd.yaml deleted file mode 100644 index 3845e2d..0000000 --- a/cmd/bpa-operator/deploy/crds/software-crd/bpa_v1alpha1_software_crd.yaml +++ /dev/null @@ -1,40 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: softwares.bpa.akraino.org -spec: - group: bpa.akraino.org - names: - kind: Software - listKind: SoftwareList - plural: softwares - singular: software - shortNames: - - sw - scope: Namespaced - subresources: - status: {} - validation: - openAPIV3Schema: - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - type: object - status: - type: object - version: v1alpha1 - versions: - - name: v1alpha1 - served: true - storage: true diff --git a/cmd/bpa-operator/deploy/crds/software-crd/test_bpa_v1alpha1_software_cr.yaml b/cmd/bpa-operator/deploy/crds/software-crd/test_bpa_v1alpha1_software_cr.yaml deleted file mode 100644 index c64615b..0000000 --- a/cmd/bpa-operator/deploy/crds/software-crd/test_bpa_v1alpha1_software_cr.yaml +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: bpa.akraino.org/v1alpha1 -kind: Software -metadata: - labels: - cluster: cluster-abc - owner: c2 - name: test-software -spec: - masterSoftware: - - nautilus - - htop - - jq: - version: 1.5+dfsg-1ubuntu0.1 - - maven - workerSoftware: - - curl - - htop - - tmux - - jq diff --git a/cmd/bpa-operator/deploy/kud-installer.yaml b/cmd/bpa-operator/deploy/kud-installer.yaml deleted file mode 100644 index cbeafa4..0000000 --- a/cmd/bpa-operator/deploy/kud-installer.yaml +++ /dev/null @@ -1,28 +0,0 @@ ---- -# Copyright 2021 Intel Corporation, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -apiVersion: v1 -kind: ConfigMap -metadata: - name: kud-installer -data: - #Choose either docker or containerd for the CONTAINER_RUNTIME - CONTAINER_RUNTIME: "docker" - KUD_DEBUG: "" - KUD_ENABLE_TESTS: "true" - #Kata webhook will only be enabled if containerd is used - ENABLE_KATA_WEBHOOK: "false" - #Chose either "kata-qemu" or "kata-clh" for the Kata webhook runtimeclass - KATA_WEBHOOK_RUNTIMECLASS: "kata-clh" diff --git a/cmd/bpa-operator/deploy/netattachdef-flannel-vm.yaml b/cmd/bpa-operator/deploy/netattachdef-flannel-vm.yaml deleted file mode 100644 index b2ff402..0000000 --- a/cmd/bpa-operator/deploy/netattachdef-flannel-vm.yaml +++ /dev/null @@ -1,20 +0,0 @@ -apiVersion: "k8s.cni.cncf.io/v1" -kind: NetworkAttachmentDefinition -metadata: - name: flannel-vm -spec: - config: '{ - "cniVersion": "0.3.1", - "name" : "cni0", - "plugins": [ { - "type": "flannel", - "cniVersion": "0.3.1", - "masterplugin": true, - "delegate": { - "isDefaultGateway": true - } - }, - { - "type": "tuning" - }] - }' diff --git a/cmd/bpa-operator/deploy/operator.yaml b/cmd/bpa-operator/deploy/operator.yaml deleted file mode 100644 index 2170148..0000000 --- a/cmd/bpa-operator/deploy/operator.yaml +++ /dev/null @@ -1,43 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: bpa-operator -spec: - replicas: 1 - selector: - matchLabels: - name: bpa-operator - template: - metadata: - labels: - name: bpa-operator - spec: - hostNetwork: true - serviceAccountName: bpa-operator - containers: - - name: bpa-operator - # Replace this with the built image name - image: akraino.org/icn/bpa-operator:latest - imagePullPolicy: IfNotPresent - volumeMounts: - - name: icn-cluster - mountPath: /multi-cluster - command: - - /bpa-operator - securityContext: - privileged: true - env: - - name: WATCH_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: OPERATOR_NAME - value: "bpa-operator" - volumes: - - name: icn-cluster - hostPath: - path: /opt/kud/multi-cluster diff --git a/cmd/bpa-operator/deploy/role.yaml b/cmd/bpa-operator/deploy/role.yaml deleted file mode 100644 index c7b6c95..0000000 --- a/cmd/bpa-operator/deploy/role.yaml +++ /dev/null @@ -1,74 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - creationTimestamp: null - name: bpa-operator -rules: -- apiGroups: - - "" - resources: - - pods - - services - - endpoints - - persistentvolumeclaims - - events - - configmaps - - secrets - - namespaces - - jobs - verbs: - - '*' -- apiGroups: - - apps - resources: - - deployments - - daemonsets - - replicasets - - statefulsets - verbs: - - '*' -- apiGroups: - - monitoring.coreos.com - resources: - - servicemonitors - verbs: - - get - - create -- apiGroups: - - apps - resourceNames: - - bpa-operator - resources: - - deployments/finalizers - verbs: - - update -- apiGroups: - - "" - resources: - - pods - verbs: - - get -- apiGroups: - - apps - resources: - - replicasets - verbs: - - get -- apiGroups: - - bpa.akraino.org - resources: - - '*' - verbs: - - '*' -- apiGroups: - - metal3.io - resources: - - baremetalhosts - verbs: - - '*' -- apiGroups: - - "batch" - resources: - - jobs - verbs: - - '*' diff --git a/cmd/bpa-operator/deploy/role_binding.yaml b/cmd/bpa-operator/deploy/role_binding.yaml deleted file mode 100644 index e837cdb..0000000 --- a/cmd/bpa-operator/deploy/role_binding.yaml +++ /dev/null @@ -1,12 +0,0 @@ -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: bpa-operator -subjects: -- kind: ServiceAccount - name: bpa-operator - namespace: default -roleRef: - kind: ClusterRole - name: bpa-operator - apiGroup: rbac.authorization.k8s.io diff --git a/cmd/bpa-operator/deploy/service_account.yaml b/cmd/bpa-operator/deploy/service_account.yaml deleted file mode 100644 index 6157f10..0000000 --- a/cmd/bpa-operator/deploy/service_account.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: bpa-operator diff --git a/cmd/bpa-operator/deploy/virtlet-deployment-sample.yaml b/cmd/bpa-operator/deploy/virtlet-deployment-sample.yaml deleted file mode 100644 index 6a5b925..0000000 --- a/cmd/bpa-operator/deploy/virtlet-deployment-sample.yaml +++ /dev/null @@ -1,74 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: virtlet-deployment - labels: - app: virtlet -spec: - replicas: 1 - selector: - matchLabels: - app: virtlet - template: - metadata: - labels: - app: virtlet - annotations: - VirtletLibvirtCPUSetting: | - mode: host-passthrough - # This tells CRI Proxy that this pod belongs to Virtlet runtime - kubernetes.io/target-runtime: virtlet.cloud - VirtletCloudInitUserData: | - ssh_pwauth: True - disable_root: false - chpasswd: {expire: False} - manage_resolv_conf: True - resolv_conf: - nameservers: ['8.8.8.8', '8.8.4.4'] - users: - - name: root - gecos: User - primary-group: root - groups: users - lock_passwd: false - shell: /bin/bash - sudo: ALL=(ALL) NOPASSWD:ALL - ssh_authorized_keys: - $ssh_key - runcmd: - - sed -i -e 's/^#DNS=.*/DNS=8.8.8.8/g' /etc/systemd/resolved.conf - - systemctl daemon-reload - - systemctl restart systemd-resolved - v1.multus-cni.io/default-network: '[ - { "name": "flannel-vm", - "mac": "c2:b4:57:49:47:f1" }]' - VirtletRootVolumeSize: 8Gi - VirtletVCPUCount: "2" - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: extraRuntime - operator: In - values: - - virtlet - containers: - - name: virtlet-deployment - # This specifies the image to use. - # virtlet.cloud/ prefix is used by CRI proxy, the remaining part - # of the image name is prepended with https:// and used to download the image - image: virtlet.cloud/ubuntu/18.04 - imagePullPolicy: IfNotPresent - # tty and stdin required for "kubectl attach -t" to work - tty: true - stdin: true - resources: - requests: - cpu: 2 - memory: 8Gi - limits: - # This memory limit is applied to the libvirt domain definition - cpu: 2 - memory: 8Gi diff --git a/cmd/bpa-operator/e2etest/bpa_bmh_verifier.sh b/cmd/bpa-operator/e2etest/bpa_bmh_verifier.sh deleted file mode 100755 index 433c26f..0000000 --- a/cmd/bpa-operator/e2etest/bpa_bmh_verifier.sh +++ /dev/null @@ -1,193 +0,0 @@ -#!/usr/bin/env bash -set -eu -o pipefail - -SCRIPTDIR="$(readlink -f $(dirname ${BASH_SOURCE[0]}))" -LIBDIR="$(dirname $(dirname $(dirname ${SCRIPTDIR})))/env/lib" - -source $LIBDIR/common.sh - -CLUSTER_NAME=test-bmh-cluster -ADDONS_NAMESPACE=kud - -function emco_ready { - KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n emco wait pod --all --for=condition=Ready --timeout=0s 1>/dev/null 2>/dev/null -} - -function emcoctl_apply { - [[ $(/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh apply -f $@ -v values.yaml | - awk '/Response Code:/ {code=$3} END{print code}') =~ 2.. ]] -} - -function emcoctl_delete { - [[ $(/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh delete -f $@ -v values.yaml | - awk '/Response Code:/ {code=$3} END{print code}') =~ 404 ]] -} - -function emcoctl_instantiate { - [[ $(/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh apply projects/kud/composite-apps/$@/v1/deployment-intent-groups/deployment/instantiate | - awk '/Response Code:/ {code=$3} END{print code}') =~ 2.. ]] -} - -function emcoctl_terminate { - [[ $(/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh apply projects/kud/composite-apps/$@/v1/deployment-intent-groups/deployment/terminate | - awk '/Response Code:/ {code=$3} END{print code}') =~ 2.. ]] -} - -function emcoctl { - local -r op=$1 - shift - - local -r interval=2 - for ((try=0;try<600;try+=${interval})); do - if emco_ready; then break; fi - echo "$(date +%H:%M:%S) - Waiting for emco" - sleep ${interval}s - done - - for ((;try<600;try+=${interval})); do - case ${op} in - "apply") if emcoctl_apply $@; then return 0; fi ;; - "delete") if emcoctl_delete $@; then return 0; fi ;; - "instantiate") if emcoctl_instantiate $@; then return 0; fi ;; - "terminate") if emcoctl_terminate $@; then return 0; fi ;; - esac - echo "$(date +%H:%M:%S) - Waiting for emcoctl ${op} $@" - sleep ${interval}s - done - - return 1 -} - -function addons_instantiated { - KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} wait pod -l app.kubernetes.io/instance=r1 --for=condition=Ready --timeout=0s 1>/dev/null 2>/dev/null -} - -function addons_terminated { - [[ $(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} get pod -l app.kubernetes.io/instance=r1 --no-headers 2>/dev/null | wc -l) == 0 ]] -} - -function networks_instantiated { - local -r count=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} get sriovnetworknodestate --no-headers 2>/dev/null | wc -l) - local -r succeeded=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} get sriovnetworknodestate -o jsonpath='{range .items[*]}{.status.syncStatus}{"\n"}{end}' 2>/dev/null | grep "Succeeded" | wc -l) - [[ $count == $succeeded ]] -} - -function networks_terminated { - # The syncStatus will be the same whether we are instantiating or terminating an SR-IOV network - networks_instantiated -} - -function kubevirt_instantiated { - [[ $(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} get kubevirt -o jsonpath='{range .items[*]}{.status.phase}{"\n"}{end}' 2>/dev/null | grep "Deployed" | wc -l) == 1 ]] - [[ $(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} get cdi -o jsonpath='{range .items[*]}{.status.phase}{"\n"}{end}' 2>/dev/null | grep "Deployed" | wc -l) == 1 ]] -} - -function kubevirt_terminated { - [[ $(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} get kubevirt --no-headers 2>/dev/null | wc -l) == 0 ]] - [[ $(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} get cdi --no-headers 2>/dev/null | wc -l) == 0 ]] -} - -if [[ $1 == "provision" ]]; then - kubectl create -f e2etest/test_bmh_provisioning_cr.yaml - sleep 5 - - #Check Status of kud job pod - status="Running" - - while [[ $status == "Running" ]] - do - echo "KUD install job still running" - sleep 2m - stats=$(kubectl get pods |grep -i kud-${CLUSTER_NAME}) - status=$(echo $stats | cut -d " " -f 3) - done - - #Print logs of Job Pod - jobPod=$(kubectl get pods|grep kud-${CLUSTER_NAME}) - podName=$(echo $jobPod | cut -d " " -f 1) - printf "\nNow Printing Job pod logs\n" - kubectl logs $podName - - if [[ $status == "Completed" ]]; - then - printf "KUD Install Job completed\n" - printf "Checking cluster status\n" - - source ../../env/lib/common.sh - CLUSTER_KUBECONFIG=/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/admin.conf - APISERVER=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl config view --minify -o jsonpath='{.clusters[0].cluster.server}') - TOKEN=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get secret $(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get serviceaccount default -o jsonpath='{.secrets[0].name}') -o jsonpath='{.data.token}' | base64 --decode) - if ! call_api $APISERVER/api --header "Authorization: Bearer $TOKEN" --insecure; - then - printf "\nKubernetes Cluster Install did not complete successfully\n" - exit 1 - else - printf "\nKubernetes Cluster Install was successful\n" - fi - - else - printf "KUD Install Job failed\n" - exit 1 - fi - - #Apply addons - printf "Applying KUD addons\n" - pushd /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/addons - emcoctl apply 00-controllers.yaml - emcoctl apply 01-cluster.yaml - emcoctl apply 02-project.yaml - emcoctl apply 03-addons-app.yaml - popd - - #Instantiate addons - emcoctl instantiate addons - wait_for addons_instantiated - emcoctl instantiate networks - wait_for networks_instantiated - emcoctl instantiate kubevirt - wait_for kubevirt_instantiated - - #Test addons - printf "Testing KUD addons\n" - pushd /opt/kud/multi-cluster/addons/tests - failed_kud_tests="" - container_runtime=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get nodes -o jsonpath='{.items[].status.nodeInfo.containerRuntimeVersion}') - if [[ "${container_runtime}" == "containerd://1.2.13" ]]; then - #With containerd 1.2.13, the qat test container image fails to unpack. - kud_tests="topology-manager-sriov kubevirt multus ovn4nfv nfd sriov-network cmk" - else - kud_tests="topology-manager-sriov kubevirt multus ovn4nfv nfd sriov-network qat cmk" - fi - for test in ${kud_tests}; do - KUBECONFIG=${CLUSTER_KUBECONFIG} bash ${test}.sh || failed_kud_tests="${failed_kud_tests} ${test}" - done - KUBECONFIG=${CLUSTER_KUBECONFIG} DEMO_FOLDER=${PWD} PATH=/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts:${PATH} bash plugin_fw_v2.sh --external || failed_kud_tests="${failed_kud_tests} plugin_fw_v2" - if [[ ! -z "$failed_kud_tests" ]]; then - printf "Test cases failed:${failed_kud_tests}\n" - exit 1 - fi - popd - printf "All test cases passed\n" -elif [[ $1 == "teardown" ]]; then - CLUSTER_KUBECONFIG=/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/admin.conf - #Tear down setup - printf "\n\nBeginning BMH E2E Test Teardown\n\n" - emcoctl terminate kubevirt - wait_for kubevirt_terminated - emcoctl terminate networks - wait_for networks_terminated - emcoctl terminate addons - wait_for addons_terminated - pushd /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/addons - emcoctl delete 03-addons-app.yaml - emcoctl delete 02-project.yaml - emcoctl delete 01-cluster.yaml - emcoctl delete 00-controllers.yaml - popd - kubectl delete -f e2etest/test_bmh_provisioning_cr.yaml - kubectl delete job kud-${CLUSTER_NAME} - kubectl delete --ignore-not-found=true configmap ${CLUSTER_NAME}-configmap - rm -rf /opt/kud/multi-cluster/${CLUSTER_NAME} - rm -rf /opt/kud/multi-cluster/addons - make delete -fi diff --git a/cmd/bpa-operator/e2etest/bpa_remote_compute_cr_sample.yaml b/cmd/bpa-operator/e2etest/bpa_remote_compute_cr_sample.yaml deleted file mode 100644 index a2c77e0..0000000 --- a/cmd/bpa-operator/e2etest/bpa_remote_compute_cr_sample.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: bpa.akraino.org/v1alpha1 -kind: Provisioning -metadata: - name: bpa-remote - labels: - cluster: bpa-remote - owner: c1 -spec: - masters: - - remote-compute: - mac-address: 08:00:27:ef:ab:60 - KUDPlugins: - - bpa - PodSubnet: 10.244.64.0/18 diff --git a/cmd/bpa-operator/e2etest/bpa_remote_virtletvm_cr.yaml b/cmd/bpa-operator/e2etest/bpa_remote_virtletvm_cr.yaml deleted file mode 100644 index 0788a06..0000000 --- a/cmd/bpa-operator/e2etest/bpa_remote_virtletvm_cr.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: bpa.akraino.org/v1alpha1 -kind: Provisioning -metadata: - name: remote-vvm110 - labels: - cluster: remote-vvm110 - cluster-type: virtlet-vm - owner: c1 -spec: - masters: - - master-1: - mac-address: c2:b4:57:49:47:f1 - PodSubnet: 172.21.64.0/18 diff --git a/cmd/bpa-operator/e2etest/bpa_remote_virtletvm_cr_multi.yaml b/cmd/bpa-operator/e2etest/bpa_remote_virtletvm_cr_multi.yaml deleted file mode 100644 index 6523053..0000000 --- a/cmd/bpa-operator/e2etest/bpa_remote_virtletvm_cr_multi.yaml +++ /dev/null @@ -1,22 +0,0 @@ -apiVersion: bpa.akraino.org/v1alpha1 -kind: Provisioning -metadata: - name: remote-vvm115 - labels: - cluster: remote-vvm115 - cluster-type: virtlet-vm - owner: c1 -spec: - masters: - - master-1: - mac-address: c2:b4:57:49:47:f3 - - master-2: - mac-address: c2:b4:57:49:47:f4 - - master-3: - mac-address: c2:b4:57:49:47:f5 - workers: - - worker-1: - mac-address: c2:b4:57:49:47:f6 - - worker-2: - mac-address: c2:b4:57:49:47:f7 - PodSubnet: 172.21.64.0/18 diff --git a/cmd/bpa-operator/e2etest/bpa_remote_virtletvm_verifier.sh b/cmd/bpa-operator/e2etest/bpa_remote_virtletvm_verifier.sh deleted file mode 100755 index 6f0fa1f..0000000 --- a/cmd/bpa-operator/e2etest/bpa_remote_virtletvm_verifier.sh +++ /dev/null @@ -1,77 +0,0 @@ -#!/usr/bin/env bash -set -eu -o pipefail - -printf "\n\nStart Remote Virtlet VM provisioning E2E test\n\n" - -# remote compute provisioned and kube config available -source ~/ICN/latest/icn/env/lib/common.sh -CLUSTER_NAME=bpa-remote -KUBECONFIG=--kubeconfig=/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/admin.conf -APISERVER=$(kubectl ${KUBECONFIG} config view --minify -o jsonpath='{.clusters[0].cluster.server}') -TOKEN=$(kubectl ${KUBECONFIG} get secret $(kubectl ${KUBECONFIG} get serviceaccount default -o jsonpath='{.secrets[0].name}') -o jsonpath='{.data.token}' | base64 --decode ) -if ! call_api $APISERVER/api --header "Authorization: Bearer $TOKEN" --insecure; -then - printf "\nRemote Kubernetes Cluster Install did not complete successfully\n" -else - printf "\niRemote Kubernetes Cluster Install was successful\n" -fi - -# create virtlet VM in remote compute -printf "Create remote Virtlet VM ...\n" -key=$(cat /opt/kud/multi-cluster/.ssh/id_rsa.pub) -cp ../deploy/virtlet-deployment-sample.yaml bpa_remote_virtletvm.yaml -sed -i "s|\$ssh_key|${key}|" bpa_remote_virtletvm.yaml -kubectl ${KUBECONFIG} create -f bpa_remote_virtletvm.yaml - -status="" -while [[ $status != "Running" ]] -do - stats=$(kubectl ${KUBECONFIG} get pods |grep -i virtlet-deployment) - status=$(echo $stats | cut -d " " -f 3) - if [[ $status == "Err"* ]]; then - echo "Error creating remote Virtlet VM, test incomplete" - kubectl ${KUBECONFIG} delete -f bpa_remote_virtletvm.yaml - exit 1 - fi -done - -echo "Remote Virtlet VM is ready for provisioning" - -printf "\nkubectl ${KUBECONFIG} get pods $(kubectl ${KUBECONFIG} get pods |grep -i virtlet-deployment | awk '{print $1}') -o json\n" -podjson=$(kubectl ${KUBECONFIG} get pods $(kubectl ${KUBECONFIG} get pods |grep -i virtlet-deployment | awk '{print $1}') -o json) -printf "\n$podjson\n\n" - -printf "Provision remote Virtlet VM ...\n" -kubectl ${KUBECONFIG} apply -f bpa_remote_virtletvm_cr.yaml - -#Check Status of remote kud job pod -status="Running" - -while [[ $status == "Running" ]] -do - echo "KUD install job still running" - sleep 2m - stats=$(kubectl ${KUBECONFIG} get pods |grep -i kud-) - status=$(echo $stats | cut -d " " -f 3) -done - -if [[ $status == "Completed" ]]; -then - printf "KUD Install Job completed\n" - printf "Checking cluster status\n" -else - printf "KUD Install Job failed\n" -fi - -#Print logs of Job Pod -jobPod=$(kubectl ${KUBECONFIG} get pods|grep kud-) -podName=$(echo $jobPod | cut -d " " -f 1) -printf "\nNow Printing Job pod logs\n" -kubectl ${KUBECONFIG} logs $podName - -#printf "\n\nBeginning E2E Remote VM Test Teardown\n\n" - -kubectl ${KUBECONFIG} delete -f bpa_remote_virtletvm_cr.yaml -kubectl ${KUBECONFIG} delete job kud-remotevvm -kubectl ${KUBECONFIG} delete --ignore-not-found=true configmap remotevvm-configmap -kubectl ${KUBECONFIG} delete -f bpa_remote_virtletvm.yaml diff --git a/cmd/bpa-operator/e2etest/e2e_virtletvm_test_provisioning_cr.yaml b/cmd/bpa-operator/e2etest/e2e_virtletvm_test_provisioning_cr.yaml deleted file mode 100644 index 4868b72..0000000 --- a/cmd/bpa-operator/e2etest/e2e_virtletvm_test_provisioning_cr.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: bpa.akraino.org/v1alpha1 -kind: Provisioning -metadata: - name: test-vm - labels: - cluster: cluster-vm - cluster-type: virtlet-vm - owner: c1 -spec: - masters: - - master-1: - mac-address: c2:b4:57:49:47:f1 diff --git a/cmd/bpa-operator/e2etest/test_bmh_provisioning_cr.yaml b/cmd/bpa-operator/e2etest/test_bmh_provisioning_cr.yaml deleted file mode 100644 index aca3766..0000000 --- a/cmd/bpa-operator/e2etest/test_bmh_provisioning_cr.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: bpa.akraino.org/v1alpha1 -kind: Provisioning -metadata: - name: provisioning-test-bmh - labels: - cluster: test-bmh-cluster - owner: tester -spec: - masters: - - pod11-node3: - mac-address: 00:1e:67:f1:5b:91 - workers: - - pod11-node2: - mac-address: 00:1e:67:fe:f4:1a - KUDPlugins: - - emco diff --git a/cmd/bpa-operator/go.mod b/cmd/bpa-operator/go.mod deleted file mode 100644 index cbefaf9..0000000 --- a/cmd/bpa-operator/go.mod +++ /dev/null @@ -1,45 +0,0 @@ -module github.com/bpa-operator - -require ( - github.com/NYTimes/gziphandler v1.0.1 // indirect - github.com/operator-framework/operator-sdk v0.15.2 - github.com/spf13/pflag v1.0.5 - golang.org/x/crypto v0.0.0-20191028145041-f83a4685e152 - gopkg.in/ini.v1 v1.46.0 - k8s.io/api v0.0.0 - k8s.io/apimachinery v0.0.0 - k8s.io/client-go v12.0.0+incompatible - sigs.k8s.io/controller-runtime v0.4.0 - sigs.k8s.io/controller-tools v0.2.4 -) - -// Pinned to kubernetes-1.16.2 -replace ( - k8s.io/api => k8s.io/api v0.0.0-20191016110408-35e52d86657a - k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.0.0-20191016113550-5357c4baaf65 - k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20191004115801-a2eda9f80ab8 - k8s.io/apiserver => k8s.io/apiserver v0.0.0-20191016112112-5190913f932d - k8s.io/cli-runtime => k8s.io/cli-runtime v0.0.0-20191016114015-74ad18325ed5 - k8s.io/client-go => k8s.io/client-go v0.0.0-20191016111102-bec269661e48 - k8s.io/cloud-provider => k8s.io/cloud-provider v0.0.0-20191016115326-20453efc2458 - k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.0.0-20191016115129-c07a134afb42 - k8s.io/code-generator => k8s.io/code-generator v0.0.0-20191004115455-8e001e5d1894 - k8s.io/component-base => k8s.io/component-base v0.0.0-20191016111319-039242c015a9 - k8s.io/cri-api => k8s.io/cri-api v0.0.0-20190828162817-608eb1dad4ac - k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.0.0-20191016115521-756ffa5af0bd - k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.0.0-20191016112429-9587704a8ad4 - k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.0.0-20191016114939-2b2b218dc1df - k8s.io/kube-proxy => k8s.io/kube-proxy v0.0.0-20191016114407-2e83b6f20229 - k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.0.0-20191016114748-65049c67a58b - k8s.io/kubectl => k8s.io/kubectl v0.0.0-20191016120415-2ed914427d51 - k8s.io/kubelet => k8s.io/kubelet v0.0.0-20191016114556-7841ed97f1b2 - k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.0.0-20191016115753-cf0698c3a16b - k8s.io/metrics => k8s.io/metrics v0.0.0-20191016113814-3b1a734dba6e - k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.0.0-20191016112829-06bb3c9d77c9 -) - -replace github.com/operator-framework/operator-sdk => github.com/operator-framework/operator-sdk v0.15.2 - -replace github.com/docker/docker => github.com/moby/moby v0.7.3-0.20190826074503-38ab9da00309 // Required by Helm - -go 1.13 diff --git a/cmd/bpa-operator/go.sum b/cmd/bpa-operator/go.sum deleted file mode 100644 index 0147846..0000000 --- a/cmd/bpa-operator/go.sum +++ /dev/null @@ -1,1237 +0,0 @@ -bitbucket.org/bertimus9/systemstat v0.0.0-20180207000608-0eeff89b0690/go.mod h1:Ulb78X89vxKYgdL24HMTiXYHlyHEvruOj1ZPlqeNEZM= -bitbucket.org/ww/goautoneg v0.0.0-20120707110453-75cd24fc2f2c/go.mod h1:1vhO7Mn/FZMgOgDVGLy5X1mE6rq1HbkBdkF/yj8zkcg= -bou.ke/monkey v1.0.1/go.mod h1:FgHuK96Rv2Nlf+0u1OOVDpCMdsWyOFmeeketDHE7LIg= -cloud.google.com/go v0.0.0-20160913182117-3b1ae45394a2/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.37.2 h1:4y4L7BdHenTfZL0HervofNTHh9Ad6mNX72cQvl+5eH0= -cloud.google.com/go v0.37.2/go.mod h1:H8IAquKe2L30IxoupDgqTaQvKSwF/c8prYHynGIWQbA= -cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw= -cloud.google.com/go v0.38.0 h1:ROfEUZz+Gh5pa62DJWXSaonyu3StP6EA6lPEXPI6mCo= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -contrib.go.opencensus.io/exporter/ocagent v0.4.9/go.mod h1:ueLzZcP7LPhPulEBukGn4aLh7Mx9YJwpVJ9nL2FYltw= -contrib.go.opencensus.io/exporter/ocagent v0.4.11 h1:Zwy9skaqR2igcEfSVYDuAsbpa33N0RPtnYTHEe2whPI= -contrib.go.opencensus.io/exporter/ocagent v0.4.11/go.mod h1:7ihiYRbdcVfW4m4wlXi9WRPdv79C0fStcjNlyE6ek9s= -git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= -git.apache.org/thrift.git v0.12.0/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= -github.com/Azure/azure-sdk-for-go v32.5.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/Azure/go-autorest v11.1.0+incompatible h1:9DfMsQdUMEtg1jKRTjtkNZsvOuZXJOMl4dN1kiQwAc8= -github.com/Azure/go-autorest v11.1.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs= -github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU= -github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/to v0.2.0/go.mod h1:GunWKJp1AEqgMaGLV+iocmRAJWqST1wQYhyyjXJ3SJc= -github.com/Azure/go-autorest/autorest/validation v0.1.0/go.mod h1:Ha3z/SqBeaalWQvokg3NZAlQTalVMtOIAs1aGK7G6u8= -github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= -github.com/BurntSushi/toml v0.3.0/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= -github.com/GoogleCloudPlatform/k8s-cloud-provider v0.0.0-20190822182118-27a4ced34534/go.mod h1:iroGtC8B3tQiqtds1l+mgk/BBOrxbqjH+eUfFQYRc14= -github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab/go.mod h1:3VYc5hodBMJ5+l/7J4xAyMeuM2PNuepvHlGs8yilUCA= -github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= -github.com/MakeNowJust/heredoc v0.0.0-20171113091838-e9091a26100e/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= -github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Masterminds/semver/v3 v3.0.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= -github.com/Masterminds/sprig v0.0.0-20190301161902-9f8fceff796f/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= -github.com/Masterminds/sprig/v3 v3.0.0/go.mod h1:NEUY/Qq8Gdm2xgYA+NwJM6wmfdRV9xkh8h/Rld20R0U= -github.com/Masterminds/vcs v1.13.0/go.mod h1:N09YCmOQr6RLxC6UNHzuVwAdodYbbnycGHSmwVJjcKA= -github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= -github.com/Microsoft/go-winio v0.4.12/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= -github.com/Microsoft/hcsshim v0.0.0-20190417211021-672e52e9209d/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= -github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/NYTimes/gziphandler v1.0.1/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/Rican7/retry v0.1.0/go.mod h1:FgOROf8P5bebcC1DS0PdOQiqGUridaZvikzUmkFW6gg= -github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= -github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= -github.com/ant31/crd-validation v0.0.0-20180702145049-30f8a35d0ac2/go.mod h1:X0noFIik9YqfhGYBLEHg8LJKEwy7QIitLQuFMpKLcPk= -github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= -github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/appscode/jsonpatch v0.0.0-20190108182946-7c0e3b262f30 h1:Kn3rqvbUFqSepE2OqVu0Pn1CbDw9IuMlONapol0zuwk= -github.com/appscode/jsonpatch v0.0.0-20190108182946-7c0e3b262f30/go.mod h1:4AJxUpXUhv4N+ziTvIcWWXgeorXpxPZOfk9HdEVr96M= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/auth0/go-jwt-middleware v0.0.0-20170425171159-5493cabe49f7/go.mod h1:LWMyo4iOLWXHGdBki7NIht1kHru/0wM179h+d3g8ATM= -github.com/aws/aws-sdk-go v1.16.26/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.17.7/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/bazelbuild/bazel-gazelle v0.0.0-20181012220611-c728ce9f663e/go.mod h1:uHBSeeATKpVazAACZBDPL/Nk/UhQDDsJWDlqYJo8/Us= -github.com/bazelbuild/buildtools v0.0.0-20180226164855-80c7f0d45d7e/go.mod h1:5JP0TXzWDHXv8qvxRC4InIazwdyDseBDbzESUMKk1yU= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bifurcation/mint v0.0.0-20180715133206-93c51c6ce115/go.mod h1:zVt7zX3K/aDCk9Tj+VM7YymsX66ERvzCJzw8rFCX2JU= -github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= -github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= -github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= -github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= -github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= -github.com/brancz/gojsontoyaml v0.0.0-20190425155809-e8bd32d46b3d/go.mod h1:IyUJYN1gvWjtLF5ZuygmxbnsAyP3aJS6cHzIuZY50B0= -github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= -github.com/bugsnag/bugsnag-go v1.5.0/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= -github.com/bugsnag/panicwrap v1.2.0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= -github.com/caddyserver/caddy v1.0.3/go.mod h1:G+ouvOY32gENkJC+jhgl62TyhvqEsFaDiZ4uw0RzP1E= -github.com/campoy/embedmd v1.0.0/go.mod h1:oxyr9RCiSXg0M3VJ3ks0UGfp98BpSSGr0kpiX3MzVl8= -github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/census-instrumentation/opencensus-proto v0.2.0 h1:LzQXZOgg4CQfE6bFvXGM30YZL1WW/M337pXml+GrcZ4= -github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/prettybench v0.0.0-20150116022406-03b8cfe5406c/go.mod h1:Xe6ZsFhtM8HrDku0pxJ3/Lr51rwykrzgFwpmTzleatY= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.0 h1:yTUvW7Vhb89inJ+8irsUqiWjh8iT6sQPZiQzI6ReGkA= -github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM= -github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= -github.com/chai2010/gettext-go v0.0.0-20170215093142-bf70f2a70fb1/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= -github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b/go.mod h1:TrMrLQfeENAPYPRsJuq3jsqdlRh3lvi6trTZJG8+tho= -github.com/cheekybits/genny v0.0.0-20170328200008-9127e812e1e9/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/cfssl v0.0.0-20180726162950-56268a613adf/go.mod h1:yMWuSON2oQp+43nFtAV/uvKQIFpSPerB57DCt9t8sSA= -github.com/clusterhq/flocker-go v0.0.0-20160920122132-2b8b7259d313/go.mod h1:P1wt9Z3DP8O6W3rvwCt0REIlshg1InHImaLW0t3ObY0= -github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= -github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= -github.com/codegangsta/negroni v1.0.0/go.mod h1:v0y3T5G7Y1UlFfyxFn/QLRU4a2EuNau2iZY63YTKWo0= -github.com/container-storage-interface/spec v1.1.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4= -github.com/containerd/console v0.0.0-20170925154832-84eeaae905fa/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= -github.com/containerd/containerd v1.0.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.2.7/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.0-beta.2.0.20190823190603-4a2f61c4f2b4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/continuity v0.0.0-20181203112020-004b46473808/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containerd/typeurl v0.0.0-20190228175220-2a93cfde8c20/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= -github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= -github.com/coredns/corefile-migration v1.0.2/go.mod h1:OFwBp/Wc9dJt5cAZzHWMNhK1r5L0p0jDwIBc6j8NC8E= -github.com/coreos/bbolt v1.3.0/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/bbolt v1.3.1-coreos.6/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.9+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.15+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-oidc v0.0.0-20180117170138-065b426bd416/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= -github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= -github.com/coreos/go-semver v0.0.0-20180108230905-e214231b295a/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20181031085051-9002847aa142/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/prometheus-operator v0.29.0 h1:Moi4klbr1xUVaofWzlaM12mxwCL294GiLW2Qj8ku0sY= -github.com/coreos/prometheus-operator v0.29.0/go.mod h1:SO+r5yZUacDFPKHfPoUjI3hMsH+ZUdiuNNhuSq3WoSg= -github.com/coreos/prometheus-operator v0.34.0 h1:TF9qaydNeUamLKs0hniaapa4FBz8U8TIlRRtJX987A4= -github.com/coreos/prometheus-operator v0.34.0/go.mod h1:Li6rMllG/hYIyXfMuvUwhyC+hqwJVHdsDdP21hypT1M= -github.com/coreos/rkt v1.30.0/go.mod h1:O634mlH6U7qk87poQifK6M2rsFNt+FyUTWNMnP1hF1U= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= -github.com/cznic/b v0.0.0-20180115125044-35e9bbe41f07/go.mod h1:URriBxXwVq5ijiJ12C7iIZqlA69nTlI+LgI6/pwftG8= -github.com/cznic/fileutil v0.0.0-20180108211300-6a051e75936f/go.mod h1:8S58EK26zhXSxzv7NQFpnliaOQsmDUxvoQO3rt154Vg= -github.com/cznic/golex v0.0.0-20170803123110-4ab7c5e190e4/go.mod h1:+bmmJDNmKlhWNG+gwWCkaBoTy39Fs+bzRxVBzoTQbIc= -github.com/cznic/internal v0.0.0-20180608152220-f44710a21d00/go.mod h1:olo7eAdKwJdXxb55TKGLiJ6xt1H0/tiiRCWKVLmtjY4= -github.com/cznic/lldb v1.1.0/go.mod h1:FIZVUmYUVhPwRiPzL8nD/mpFcJ/G7SSXjjXYG4uRI3A= -github.com/cznic/mathutil v0.0.0-20180504122225-ca4c9f2c1369/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM= -github.com/cznic/ql v1.2.0/go.mod h1:FbpzhyZrqr0PVlK6ury+PoW3T0ODUV22OeWIxcaOrSE= -github.com/cznic/sortutil v0.0.0-20150617083342-4c7342852e65/go.mod h1:q2w6Bg5jeox1B+QkJ6Wp/+Vn0G/bo3f1uY7Fn3vivIQ= -github.com/cznic/strutil v0.0.0-20171016134553-529a34b1c186/go.mod h1:AHHPPPXTw0h6pVabbcbyGRK1DckRn7r/STdZEeIDzZc= -github.com/cznic/zappy v0.0.0-20160723133515-2533cb5b45cc/go.mod h1:Y1SNZ4dRUOKXshKUbwUapqNncRrho4mkjQebgEHZLj8= -github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE= -github.com/deislabs/oras v0.7.0/go.mod h1:sqMKPG3tMyIX9xwXUBRLhZ24o+uT4y6jgBD2RzUTKDM= -github.com/denisenkom/go-mssqldb v0.0.0-20190515213511-eb9f6a1743f3/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM= -github.com/dgrijalva/jwt-go v0.0.0-20160705203006-01aeca54ebda/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/dhui/dktest v0.3.0/go.mod h1:cyzIUfGsBEbZ6BT7tnXqAShHSXCZhSNmFl70sZ7c1yc= -github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= -github.com/docker/cli v0.0.0-20190506213505-d88565df0c2d/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/distribution v2.6.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/distribution v2.7.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v0.0.0-20180612054059-a9fbbdc8dd87/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker-credential-helpers v0.6.1/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= -github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-metrics v0.0.0-20181218153428-b84716841b82/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= -github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/libnetwork v0.0.0-20180830151422-a9cd636e3789/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8= -github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= -github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= -github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= -github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= -github.com/elazarl/go-bindata-assetfs v1.0.0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4= -github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/elazarl/goproxy v0.0.0-20190421051319-9d40249d3c2f/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/elazarl/goproxy/ext v0.0.0-20190421051319-9d40249d3c2f/go.mod h1:gNh8nYJoAm43RfaxurUnxr+N1PwuFV3ZMl/efxlIlY8= -github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.8.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.8.1+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.9.3+incompatible h1:2OwhVdhtzYUp5P5wuGsVDPagKSRd9JK72sJCHVCXh5g= -github.com/emicklei/go-restful v2.9.3+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.9.6+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.11.1+incompatible h1:CjKsv3uWcCMvySPQYKxO8XX3f9zD4FeZRsW4G0B4ffE= -github.com/emicklei/go-restful v2.11.1+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful-swagger12 v0.0.0-20170926063155-7524189396c6/go.mod h1:qr0VowGBT4CS4Q8vFF8BSeKz34PuqKGxs/L0IAQA9DQ= -github.com/euank/go-kmsg-parser v2.0.0+incompatible/go.mod h1:MhmAMZ8V4CYH4ybgdRwPr2TU5ThnS43puaKEMpja1uw= -github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v3.0.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.0.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.1.0+incompatible h1:K1MDoo4AZ4wU0GIU/fPmtZg7VpzLjCxu+UwBD1FvwOc= -github.com/evanphx/json-patch v4.1.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M= -github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= -github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= -github.com/fatih/color v1.6.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/structtag v1.1.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= -github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= -github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsouza/fake-gcs-server v1.7.0/go.mod h1:5XIRs4YvwNbNoz+1JF8j6KLAyDh7RHGAyAK3EP2EsNk= -github.com/garyburd/redigo v1.6.0/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= -github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= -github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= -github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/go-acme/lego v2.5.0+incompatible/go.mod h1:yzMNe9CasVUhkquNvti5nAtPmG94USbYxYrZfTkIn0M= -github.com/go-bindata/go-bindata v3.1.1+incompatible/go.mod h1:xK8Dsgwmeed+BBsSy2XTopBn/8uK2HWuGSnA11C3Joo= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/zapr v0.1.0 h1:h+WVe9j6HAA01niTJPA/kKH0i7e0rLZBCwauQFcRE54= -github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= -github.com/go-logr/zapr v0.1.1 h1:qXBXPDdNncunGs7XeEpsJt8wCjYBygluzfdLO0G5baE= -github.com/go-logr/zapr v0.1.1/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= -github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= -github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.17.2/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= -github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.17.2/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= -github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= -github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.19.0 h1:FTUMcX77w5rQkClIzDtTxvn6Bsa894CcrzNj2MMfeg8= -github.com/go-openapi/jsonpointer v0.19.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= -github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= -github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.19.0 h1:BqWKpV1dFd+AuiKlgtddwVIFQsuMpxfBDBHGfM2yNpk= -github.com/go-openapi/jsonreference v0.19.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= -github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o= -github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.17.2/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= -github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= -github.com/go-openapi/runtime v0.17.2/go.mod h1:QO936ZXeisByFmZEO1IS1Dqhtf4QV1sYYFtIq6Ld86Q= -github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= -github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= -github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.17.2/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.19.0 h1:A4SZ6IWh3lnjH0rG0Z5lkxazMGBECtrZcbyYQi+64k4= -github.com/go-openapi/spec v0.19.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= -github.com/go-openapi/spec v0.19.4 h1:ixzUSnHTd6hCemgtAJgluaTSGYpLNpJY4mA2DIkdOAo= -github.com/go-openapi/spec v0.19.4/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= -github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.17.2/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= -github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= -github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.17.2/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.19.0 h1:Kg7Wl7LkTPlmc393QZQ/5rQadPhi7pBVEMZxyTi0Ii8= -github.com/go-openapi/swag v0.19.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.4/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/validate v0.17.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= -github.com/go-openapi/validate v0.17.2/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= -github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= -github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= -github.com/go-ozzo/ozzo-validation v3.5.0+incompatible/go.mod h1:gsEKFIVnabGBt6mXmxK0MoFy+cZoTJY6mu5Ll3LVLBU= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/gobuffalo/envy v1.6.5/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= -github.com/gobuffalo/envy v1.6.15 h1:OsV5vOpHYUpP7ZLS6sem1y40/lNX1BZj+ynMiRi21lQ= -github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/flect v0.1.5/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80= -github.com/gobuffalo/logger v1.0.0/go.mod h1:2zbswyIUa45I+c+FLXuWl9zSWEiVuthsk8ze5s8JvPs= -github.com/gobuffalo/packd v0.3.0/go.mod h1:zC7QkmNkYVGKPw4tHpBQ+ml7W/3tIebgeo1b36chA3Q= -github.com/gobuffalo/packr v1.30.1/go.mod h1:ljMyFO2EcrnzsHsN99cvbq055Y9OhRrIaviy289eRuk= -github.com/gobuffalo/packr/v2 v2.5.1/go.mod h1:8f9c96ITobJlPzI44jj+4tHnEKNt0xXWSVlXRN9X1Iw= -github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/gocql/gocql v0.0.0-20190301043612-f6df8288f9b4/go.mod h1:4Fw1eo5iaEhDUs8XyuhSVCVy52Jq3L+/3GJgYkwc+/0= -github.com/godbus/dbus v4.1.0+incompatible/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= -github.com/gofrs/flock v0.7.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= -github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/gogo/protobuf v0.0.0-20170330071051-c0656edd0d9e/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I= -github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= -github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/golang-migrate/migrate/v4 v4.6.2/go.mod h1:JYi6reN3+Z734VZ0akNuyOJNcrg45ZL7LDBMW3WGJL0= -github.com/golang/glog v0.0.0-20141105023935-44145f04b68c/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7 h1:u4bArs140e9+AfE52mFHOXVFnOSBJBRlzTHrOPLOIhE= -github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20180924190550-6f2cf27854a4/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20181024230925-c65c006176ff/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9 h1:uHTyIjqVhYRhLbJ8nIiOJHkEZZ+5YoOsAbD3sk82NiE= -github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.0.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golangplus/bytes v0.0.0-20160111154220-45c989fe5450/go.mod h1:Bk6SMAONeMXrxql8uvOKuAZSu8aM5RUGv+1C6IJaEho= -github.com/golangplus/fmt v0.0.0-20150411045040-2a5d6d7d2995/go.mod h1:lJgMEyOkYFkPcDKwRXegd+iM6E7matEszMG5HhwytU8= -github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk= -github.com/google/btree v0.0.0-20160524151835-7d79101e329e/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/cadvisor v0.34.0/go.mod h1:1nql6U13uTHaLYB8rLS5x9IJc2qT6Xd/Tr1sTX6NE48= -github.com/google/certificate-transparency-go v1.0.21/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= -github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= -github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= -github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= -github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gnostic v0.0.0-20170426233943-68f4ded48ba9/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.2.0 h1:l6N3VoaVzTncYYW+9yOz2LJJammFZGBO13sqgEhpy9g= -github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.3.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.3.1 h1:WeAefnSUHlBb0iJKwxFDZdbfGwkd7xRNuV+IpXMJhYk= -github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= -github.com/gophercloud/gophercloud v0.0.0-20180330165814-781450b3c4fc/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4= -github.com/gophercloud/gophercloud v0.0.0-20190126172459-c818fa66e4c8/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4= -github.com/gophercloud/gophercloud v0.0.0-20190318015731-ff9851476e98/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= -github.com/gophercloud/gophercloud v0.0.0-20190408160324-6c7ac67f8855 h1:3dfUujjROkkXcwIpsh9z6bjOhPFooLpxejc7qgX13/g= -github.com/gophercloud/gophercloud v0.0.0-20190408160324-6c7ac67f8855/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= -github.com/gophercloud/gophercloud v0.1.0 h1:P/nh25+rzXouhytV2pUHBb65fnds26Ghl8/391+sT5o= -github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= -github.com/gophercloud/gophercloud v0.2.0 h1:lD2Bce2xBAMNNcFZ0dObTpXkGLlVIb33RPVUNVpw6ic= -github.com/gophercloud/gophercloud v0.2.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/handlers v1.4.0/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gosuri/uitable v0.0.1/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= -github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= -github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/gregjones/httpcache v0.0.0-20181110185634-c63ab54fda8f h1:ShTPMJQes6tubcjzGMODIVG5hlrCeImaBnZzKF2N8SM= -github.com/gregjones/httpcache v0.0.0-20181110185634-c63ab54fda8f/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/gregjones/httpcache v0.0.0-20190203031600-7a902570cb17/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v0.0.0-20190222133341-cfaf5686ec79/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.1.0/go.mod h1:f5nM7jw/oeRSadq3xCzHAvxcr8HZnzsqU6ILg/0NiiE= -github.com/grpc-ecosystem/go-grpc-prometheus v0.0.0-20170330212424-2500245aa611/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.3.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= -github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= -github.com/grpc-ecosystem/grpc-gateway v1.5.1/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= -github.com/grpc-ecosystem/grpc-gateway v1.6.2/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= -github.com/grpc-ecosystem/grpc-gateway v1.6.3/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= -github.com/grpc-ecosystem/grpc-gateway v1.8.5 h1:2+KSC78XiO6Qy0hIjfc1OD9H+hsaJdJlb8Kqsd41CTE= -github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.4/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-health-probe v0.2.0/go.mod h1:4GVx/bTCtZaSzhjbGueDY5YgBdsmKeVx+LErv/n0L6s= -github.com/grpc-ecosystem/grpc-health-probe v0.2.1-0.20181220223928-2bf0a5b182db/go.mod h1:uBKkC2RbarFsvS5jMJHpVhTLvGlGQj9JJwkaePE3FWI= -github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/golang-lru v0.0.0-20160207214719-a0d98a5f2880/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.3 h1:YPkqC67at8FYaadspW/6uE0COsBxS2656RLEr8Bppgk= -github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/heketi/heketi v9.0.0+incompatible/go.mod h1:bB9ly3RchcQqsQ9CpyaQwvva7RS5ytVoSoholZQON6o= -github.com/heketi/rest v0.0.0-20180404230133-aa6a65207413/go.mod h1:BeS3M108VzVlmAue3lv2WcGuPAX94/KN63MUURzbYSI= -github.com/heketi/tests v0.0.0-20151005000721-f3775cbcefd6/go.mod h1:xGMAM8JLi7UkZt1i4FQeQy0R2T8GLUwQhOP5M1gBhy4= -github.com/heketi/utils v0.0.0-20170317161834-435bc5bdfa64/go.mod h1:RYlF4ghFZPPmk2TC5REt5OFwvfb6lzxFWrTWB+qs28s= -github.com/helm/helm-2to3 v0.2.0/go.mod h1:jQUVAWB0bM7zNIqKPIfHFzuFSK0kHYovJrjO+hqcvRk= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= -github.com/iancoleman/strcase v0.0.0-20180726023541-3605ed457bf7/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE= -github.com/iancoleman/strcase v0.0.0-20190422225806-e506e3ef7365/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE= -github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= -github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI= -github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.8 h1:CGgOkSJeqMRmt0D9XLWExdT4m4F1vd3FV3VPt+0VxkQ= -github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/improbable-eng/thanos v0.3.2/go.mod h1:GZewVGILKuJVPNRn7L4Zw+7X96qzFOwj63b22xYGXBE= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= -github.com/jackc/pgx v3.2.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= -github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= -github.com/jimstudt/http-authentication v0.0.0-20140401203705-3eca13d6893a/go.mod h1:wK6yTYYcgjHE1Z1QtXACPDjcFJyBskHEdagmnq3vsP8= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= -github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= -github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= -github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= -github.com/jonboulle/clockwork v0.0.0-20141017032234-72f9bd7c4e0c/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/jsonnet-bundler/jsonnet-bundler v0.1.0/go.mod h1:YKsSFc9VFhhLITkJS3X2PrRqWG9u2Jq99udTdDjQLfM= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= -github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= -github.com/karrick/godirwalk v1.7.5/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34= -github.com/karrick/godirwalk v1.10.12/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= -github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.4/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kshvakov/clickhouse v1.3.5/go.mod h1:DMzX7FxRymoNkVgizH0DWAL8Cur7wHLgx3MUnGwJqpE= -github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= -github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/libopenstorage/openstorage v1.0.0/go.mod h1:Sp1sIObHjat1BeXhfMqLZ14wnOzEhNx2YQedreMcUyc= -github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= -github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= -github.com/lpabon/godbc v0.1.1/go.mod h1:Jo9QV0cf3U6jZABgiJ2skINAXb9j8m51r07g4KI92ZA= -github.com/lucas-clemente/aes12 v0.0.0-20171027163421-cd47fb39b79f/go.mod h1:JpH9J1c9oX6otFSgdUHwUBUizmKlrMjxWnIAjff4m04= -github.com/lucas-clemente/quic-clients v0.1.0/go.mod h1:y5xVIEoObKqULIKivu+gD/LU90pL73bTdtQjPBvtCBk= -github.com/lucas-clemente/quic-go v0.10.2/go.mod h1:hvaRS9IHjFLMq76puFJeWNfmn+H70QZ/CXoxqw9bzao= -github.com/lucas-clemente/quic-go-certificates v0.0.0-20160823095156-d2f86524cced/go.mod h1:NCcRLrOTZbzhZvixZLlERbJtDtYsmMw8Jc4vS8Z0g58= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190403194419-1ea4449da983 h1:wL11wNW7dhKIcRCHSm4sHKPWz0tt4mwBsVodG7+Xyqg= -github.com/mailru/easyjson v0.0.0-20190403194419-1ea4449da983/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM= -github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/maorfr/helm-plugin-utils v0.0.0-20181205064038-588190cb5e3b/go.mod h1:p3gwmRSFqbWw6plBpR0sKl3n3vpu8kX70gvCJKMvvCA= -github.com/markbates/inflect v1.0.4 h1:5fh1gzTFhfae06u3hzHYO9xe3l3v3nW5Pwt3naLTP5g= -github.com/markbates/inflect v1.0.4/go.mod h1:1fR9+pO2KHEO9ZRtto13gDwwZaAKstQzferVeWqbgNs= -github.com/marten-seemann/qtls v0.2.3/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwmjgmPuiQEcYk= -github.com/martinlindhe/base36 v0.0.0-20180729042928-5cda0030da17/go.mod h1:+AtEs8xrBpCeYgSLoY/aJ6Wf37jtBuR0s35750M27+8= -github.com/martinlindhe/base36 v1.0.0/go.mod h1:+AtEs8xrBpCeYgSLoY/aJ6Wf37jtBuR0s35750M27+8= -github.com/mattbaird/jsonpatch v0.0.0-20171005235357-81af80346b1a/go.mod h1:M1qoD/MqPgTZIk0EWKB38wE28ACRfVcn+cU08jyArI0= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.6/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-shellwords v1.0.5/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= -github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/maxbrunsfeld/counterfeiter v0.0.0-20181017030959-1aadac120687/go.mod h1:aoVsckWnsNzazwF2kmD+bzgdr4GBlbK91zsdivQJ2eU= -github.com/maxbrunsfeld/counterfeiter/v6 v6.2.1/go.mod h1:F9YacGpnZbLQMzuPI0rR6op21YvNu/RjL705LJJpM3k= -github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= -github.com/mesos/mesos-go v0.0.9/go.mod h1:kPYCMQ9gsOXVAle1OsoY4I1+9kPu8GHkf88aV59fDr4= -github.com/metal3-io/baremetal-operator v0.0.0-20191015111357-fc67cc20e40b h1:I28+Qj1D5DwG4uVbxLGooVxNmofzKiN5+g4N09bwPsI= -github.com/metal3-io/baremetal-operator v0.0.0-20191015111357-fc67cc20e40b/go.mod h1:o9ta8R2EEtSiQY53sXdoM50v5531G0oS+lC58Gcm+1Y= -github.com/mholt/certmagic v0.6.2-0.20190624175158-6a42ef9fe8c2/go.mod h1:g4cOPxcjV0oFq3qwpjSA30LReKD8AoIfwAY9VvG35NY= -github.com/miekg/dns v0.0.0-20181005163659-0d29b283ac0f/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.3/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.4/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/mindprince/gonvml v0.0.0-20171110221305-fee913ce8fb2/go.mod h1:2eu9pRWp8mo84xCg6KswZ+USQHjwgRhNp06sozOdsTY= -github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= -github.com/mitchellh/hashstructure v0.0.0-20170609045927-2bca23e0e452/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ= -github.com/mitchellh/hashstructure v1.0.0/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/moby v0.7.3-0.20190826074503-38ab9da00309/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= -github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/mrunalp/fileutils v0.0.0-20160930181131-4ee1cc9a8058/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0= -github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/munnerz/goautoneg v0.0.0-20190414153302-2ae31c8b6b30/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mvdan/xurls v1.1.0/go.mod h1:tQlNn3BED8bE/15hnSL2HLkDeLWpNPAwtw7wkEq44oU= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/nakagami/firebirdsql v0.0.0-20190310045651-3c02a58cfed8/go.mod h1:86wM1zFnC6/uDBfZGNwB65O+pR2OFi5q/YQaEUid1qA= -github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= -github.com/naoina/toml v0.1.1/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= -github.com/natefinch/lumberjack v2.0.0+incompatible/go.mod h1:Wi9p2TTF5DG5oU+6YfsmYQpsTIOm0B1VNzQg9Mw6nPk= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.4.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.3.0/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.4.2-0.20180831124310-ae19f1b56d53/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v1.0.0-rc2.0.20190611121236-6cc515888830/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runtime-spec v1.0.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/selinux v1.2.2/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs= -github.com/openshift/api v3.9.1-0.20190924102528-32369d4db2ad+incompatible/go.mod h1:dh9o4Fs58gpFXGSYfnVxGR9PnV53I8TW84pQaJDdGiY= -github.com/openshift/client-go v0.0.0-20190923180330-3b6373338c9b/go.mod h1:6rzn+JTr7+WYS2E1TExP4gByoABxMznR6y2SnUIkmxk= -github.com/openshift/origin v0.0.0-20160503220234-8f127d736703/go.mod h1:0Rox5r9C8aQn6j1oAOQ0c1uC86mYbUFObzjBRvUKHII= -github.com/openshift/prom-label-proxy v0.1.1-0.20191016113035-b8153a7f39f1/go.mod h1:p5MuxzsYP1JPsNGwtjtcgRHHlGziCJJfztff91nNixw= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= -github.com/openzipkin/zipkin-go v0.1.3/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= -github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= -github.com/operator-framework/api v0.0.0-20200120235816-80fd2f1a09c9/go.mod h1:S5IdlJvmKkF84K2tBvsrqJbI2FVy03P88R75snpRxJo= -github.com/operator-framework/operator-lifecycle-manager v0.0.0-20181023032605-e838f7fb2186/go.mod h1:Ma5ZXd4S1vmMyewWlF7aO8CZiokR7Sd8dhSfkGkNU4U= -github.com/operator-framework/operator-lifecycle-manager v0.0.0-20190105193533-81104ffdc4fb/go.mod h1:XMyE4n2opUK4N6L45YGQkXXi8F9fD7XDYFv/CsS6V5I= -github.com/operator-framework/operator-lifecycle-manager v0.0.0-20190128024246-5eb7ae5bdb7a/go.mod h1:vq6TTFvg6ti1Bn6ACsZneZTmjTsURgDD6tQtVDbEgsU= -github.com/operator-framework/operator-lifecycle-manager v0.0.0-20191115003340-16619cd27fa5/go.mod h1:zL34MNy92LPutBH5gQK+gGhtgTUlZZX03I2G12vWHF4= -github.com/operator-framework/operator-registry v1.0.1/go.mod h1:1xEdZjjUg2hPEd52LG3YQ0jtwiwEGdm98S1TH5P4RAA= -github.com/operator-framework/operator-registry v1.0.4/go.mod h1:hve6YwcjM2nGVlscLtNsp9sIIBkNZo6jlJgzWw7vP9s= -github.com/operator-framework/operator-registry v1.5.1/go.mod h1:agrQlkWOo1q8U1SAaLSS2WQ+Z9vswNT2M2HFib9iuLY= -github.com/operator-framework/operator-registry v1.5.3/go.mod h1:agrQlkWOo1q8U1SAaLSS2WQ+Z9vswNT2M2HFib9iuLY= -github.com/operator-framework/operator-registry v1.5.7-0.20200121213444-d8e2ec52c19a/go.mod h1:ekexcV4O8YMxdQuPb+Xco7MHfVmRIq7Jvj5e6NU7dHI= -github.com/operator-framework/operator-sdk v0.9.0 h1:moY3n5vsg4OpD3FzHvqI68Fv+gJFQ1GaDKMHm2NRpF8= -github.com/operator-framework/operator-sdk v0.9.0/go.mod h1:7eW7ldXmvenehIMVdO2zCdERf/828Mrftq4u7GS0I68= -github.com/operator-framework/operator-sdk v0.15.2 h1:VlxI0J+HLmMKs5k53drQ/B1AXhyNj0OaD2BbREt8Hnk= -github.com/operator-framework/operator-sdk v0.15.2/go.mod h1:RkC5LpluVONa08ORFIIVCYrEr855xG1/NltRL2jQ8qo= -github.com/otiai10/copy v1.0.1/go.mod h1:8bMCJrAqOtN/d9oyh5HR7HhLQMvcGMpGdwRDYsfOCHc= -github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= -github.com/otiai10/curr v0.0.0-20190513014714-f5a3d24e5776/go.mod h1:3HNVkVOU7vZeFXocWuvtcS0XSFLcf2XUSDHkq9t1jU4= -github.com/otiai10/mint v1.2.3/go.mod h1:YnfyPNhBvnY8bW4SGQHCs/aAFhkgySlMZbrF5U0bOVw= -github.com/otiai10/mint v1.2.4/go.mod h1:d+b7n/0R3tdyUYYylALXpWQ/kTN+QobSq/4SRGBkR3M= -github.com/pborman/uuid v0.0.0-20180906182336-adf5a7427709/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34= -github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml v1.0.1/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.3.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= -github.com/petar/GoLLRB v0.0.0-20130427215148-53be0d36a84c/go.mod h1:HUpKUBZnpzkdx0kD/+Yfuft+uD3zHGtXF/XJB14TUr4= -github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= -github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/phayes/freeport v0.0.0-20171002181615-b8543db493a5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= -github.com/pquerna/ffjson v0.0.0-20180717144149-af8b230fcd20/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= -github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 h1:D+CiwcpGTW6pL6bv6KI3KbyEyCKyS+1JWS2h8PNDnGA= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= -github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= -github.com/prometheus/client_golang v1.2.1 h1:JnMpQc6ppsNgw9QPAGF6Dod479itz7lvlsMzzNayLOI= -github.com/prometheus/client_golang v1.2.1/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.0.0-20190104105734-b1c43a6df3ae/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.2.0 h1:kUZDBDTdBVBYBj5Tmh2NZLlF60mfjA27rM34b+cVwNU= -github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= -github.com/prometheus/common v0.7.0 h1:L+1lyG48J1zAQXA3RBX/nG/B3gjlHq0zTt2tlbJLyCY= -github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= -github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190104112138-b1a0a9a36d74/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190129233650-316cf8ccfec5/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190403104016-ea9eea638872 h1:0aNv3xC7DmQoy1/x1sMh18g+fihWW68LL13i8ao9kl4= -github.com/prometheus/procfs v0.0.0-20190403104016-ea9eea638872/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/procfs v0.0.5 h1:3+auTFlqw+ZaQYJARz6ArODtkaIwtvBTx3N2NehQlL8= -github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/prometheus v2.3.2+incompatible/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/prometheus/tsdb v0.8.0/go.mod h1:fSI0j+IUQrDd7+ZtR9WKIGtoYAYAJUKcKhYLG25tN4g= -github.com/quobyte/api v0.1.2/go.mod h1:jL7lIHrmqQ7yh05OJ+eEEdHr0u/kmT1Ff9iHd+4H6VI= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= -github.com/robfig/cron v0.0.0-20170526150127-736158dc09e1/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= -github.com/robfig/cron v1.1.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4/go.mod h1:qgYeAmZ5ZIpBWTGllZSQnw97Dj+woV0toclVaRGI8pc= -github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.3.0 h1:RR9dF3JtopPvtkroDZuVD7qquD0bnHlKSqaQhgwt8yk= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.5.0/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rubenv/sql-migrate v0.0.0-20191025130928-9355dd04f4b3/go.mod h1:WS0rl9eEliYI8DPnr3TOwz4439pay+qNgzJoVya/DmY= -github.com/rubiojr/go-vhd v0.0.0-20160810183302-0bfd3b39853c/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto= -github.com/russross/blackfriday v0.0.0-20151117072312-300106c228d5/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday v0.0.0-20170610170232-067529f716f4/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/sclevine/spec v1.0.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= -github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= -github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= -github.com/sirupsen/logrus v1.1.1/go.mod h1:zrgwTnHtNr00buQ1vSptGe8m1f/BbgsPukg8qsT7A+A= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.3/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.0-20180319062004-c439c4fa0937/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= -github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= -github.com/stevvooe/resumable v0.0.0-20180830230917-22b14a53ba50/go.mod h1:1pdIZTAHUz+HDKDVZ++5xg/duPlhKAIzw9qy42CWYp4= -github.com/storageos/go-api v0.0.0-20180912212459-343b3eff91fc/go.mod h1:ZrLn+e0ZuF3Y65PNF6dIwbJPZqfmtCXxFm9ckv0agOY= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/syndtr/gocapability v0.0.0-20160928074757-e7cb7fa329f4/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= -github.com/technosophos/moniker v0.0.0-20180509230615-a5dbd03a2245/go.mod h1:O1c8HleITsZqzNZDjSNzirUGsMT0oGu9LhHKoJrqO+A= -github.com/thecodeteam/goscaleio v0.1.0/go.mod h1:68sdkZAsK8bvEwBlbQnlLS+xU+hvLYM/iQ8KXej1AwM= -github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20171017195756-830351dc03c6/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/ugorji/go v1.1.1/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ= -github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/ugorji/go/codec v0.0.0-20181022190402-e5e69e061d4f/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= -github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= -github.com/vishvananda/netns v0.0.0-20171111001504-be1fbeda1936/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= -github.com/vmware/govmomi v0.20.1/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= -github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVThkpGiXrs= -github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= -github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v1.1.0/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= -github.com/xenolf/lego v0.0.0-20160613233155-a9d8cec0e656/go.mod h1:fwiGnfsIjG7OHPfOvgK7Y/Qo6+2Ox0iozjNTkZICKbY= -github.com/xenolf/lego v0.3.2-0.20160613233155-a9d8cec0e656/go.mod h1:fwiGnfsIjG7OHPfOvgK7Y/Qo6+2Ox0iozjNTkZICKbY= -github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xlab/handysort v0.0.0-20150421192137-fb3537ed64a1/go.mod h1:QcJo0QPSfTONNIgpN5RA8prR7fF8nkF6cTWTcNerRO8= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/yvasiyarov/go-metrics v0.0.0-20150112132944-c25f46c4b940/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= -github.com/yvasiyarov/gorelic v0.0.6/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= -github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= -github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= -gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b/go.mod h1:T3BPAOm2cqquPa0MKWeNkmOM5RQsRhkrwMWonFMN7fE= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.mongodb.org/mongo-driver v1.1.0/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= -go.opencensus.io v0.19.1/go.mod h1:gug0GbSHa8Pafr0d2urOSgoXHZ6x/RUlaiT0d9pqb4A= -go.opencensus.io v0.19.2/go.mod h1:NO/8qkisMZLZ1FCsKNqtJPwc8/TaclWyY0B6wcYNg9M= -go.opencensus.io v0.20.0 h1:L/ARO58pdktB6dLmYI0zAyW1XnavEmGziFd0MKfxnck= -go.opencensus.io v0.20.0/go.mod h1:NO/8qkisMZLZ1FCsKNqtJPwc8/TaclWyY0B6wcYNg9M= -go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.uber.org/atomic v0.0.0-20181018215023-8dc6146f7569/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/multierr v0.0.0-20180122172545-ddea229ff1df/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/zap v0.0.0-20180814183419-67bc79d13d15/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o= -go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= -golang.org/x/build v0.0.0-20190314133821-5284462c4bec/go.mod h1:atTaCNAy0f16Ah5aV1gMSwgiKVHwu/JncqDpuRr7lS4= -golang.org/x/crypto v0.0.0-20180222182404-49796115aa4b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20180426230345-b49d69b5da94/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181015023909-0c41d7ab0a0e/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190123085648-057139ce5d2b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5 h1:bselrhR0Or1vomJZC8ZIjWtbDmn9OYFLX5Ik9alpJpE= -golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= -golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190621222207-cc06ce4a13d4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191028145041-f83a4685e152 h1:ZC1Xn5A1nlpSmQCIva4bZ3ob3lmhYIefc+GU+DLg1Ow= -golang.org/x/crypto v0.0.0-20191028145041-f83a4685e152/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20181217174547-8f45f776aaf1/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180112015858-5ccada7d0a7b/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181102091132-c10e9556a7bc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190328230028-74de082e2cca/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190424112056-4829fb13d2c6/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc h1:gkKoSkUmnU6bpS/VhkuO27bzQeSA51uaEfbOW5dNb68= -golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191028085509-fe3aa8a45271 h1:N66aaryRB3Ax92gH0v3hp1QYZ3zWWCCUR/j8Ifh45Ss= -golang.org/x/net v0.0.0-20191028085509-fe3aa8a45271/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/oauth2 v0.0.0-20170412232759-a6bd8cefa181/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20181105165119-ca4130e427c7/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a h1:tImsplftrFpALCYumobsd0K86vlAs/eXGFms2txfJfA= -golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180117170059-2c42eef0765b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181004145325-8469e314837c/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181023152157-44b849a8bc13/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181218192612-074acd46bca6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190102155601-82a175fd1598/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190310054646-10058d7d4faa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67 h1:1Fzlr8kkDLQwqMP8GxrhptBLqZG/EDpiATneiZHY998= -golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190426135247-a129542de9ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190515120540-06a5c4944438/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f h1:25KHgbfyiSm6vwQLbM3zZIe1v9p/3ea4Rz+nnM5K/i4= -golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191028164358-195ce5e7f934 h1:u/E0NqCIWRDAo9WCFo6Ko49njPFDLSd3z+X1HgWDMpE= -golang.org/x/sys v0.0.0-20191028164358-195ce5e7f934/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20171227012246-e19ae1496984/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2 h1:z99zHgr7hKfrUcX/KsoJk5FJfjTceCKIp96+biqP4To= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/time v0.0.0-20161028155119-f51c12702a4d/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 h1:+DCIGbF/swA92ohVg0//6X2IVY3KZs6p9mix0ziNYJM= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20170824195420-5d2fd3ccab98/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181011152555-a398e557df60/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181207222222-4c874b978acb/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181219222714-6e267b5cc78e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190213015956-f7e1b50d2251/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190408170212-12dd9f86f350 h1:0USRhKWpISljvJE8egltEaoJb+VD0IUA4eOH6W1yss8= -golang.org/x/tools v0.0.0-20190408170212-12dd9f86f350/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190425222832-ad9eeb80039a/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190624180213-70d37148ca0c/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= -golang.org/x/tools v0.0.0-20191018212557-ed542cd5b28a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gomodules.xyz/jsonpatch/v2 v2.0.1 h1:xyiBuvkD2g5n7cYzx6u2sxQvsAy4QJsZFCzGVdzOXZ0= -gomodules.xyz/jsonpatch/v2 v2.0.1/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= -gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= -gonum.org/v1/gonum v0.0.0-20190710053202-4340aa3071a0/go.mod h1:03dgh78c4UvU1WksguQ/lvJQXbezKQGJSrwwRq5MraQ= -gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= -gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= -google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= -google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= -google.golang.org/api v0.0.0-20181220000619-583d854617af/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= -google.golang.org/api v0.2.0/go.mod h1:IfRCZScioGtypHNTlz3gFk67J8uePVW7uDTBzXuIkhU= -google.golang.org/api v0.3.0 h1:UIJY20OEo3+tK5MBlcdx37kmdH6EnRjGkW78mc6+EeA= -google.golang.org/api v0.3.0/go.mod h1:IuvZyQh8jgscv8qWfQ4ABd8m7hEudgBFM/EdhA3BnXw= -google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= -google.golang.org/api v0.3.2/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.6.1-0.20190607001116-5213b8090861/go.mod h1:btoxGiFvQNVUZQ8W08zLtrVS08CNpINPEfxXxgJL1Q4= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20170731182057-09f6ed296fc6/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20181016170114-94acd270e44e/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20181219182458-5a97ab628bfb/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= -google.golang.org/genproto v0.0.0-20190128161407-8ac453e89fca/go.mod h1:L3J43x8/uS+qIUoksaLKe6OS3nUKxOKuIFz1sl2/jx4= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107 h1:xtNn7qFlagY2mQNFHMSRPjT2RkOV4OXM7P5TVy9xATo= -google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20191028173616-919d9bdd9fe6/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/grpc v1.13.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= -google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.19.1 h1:TrBcJ1yqAl1G++wO39nD/qtgpsW9/1+QGrluyMGEYgM= -google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= -gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gcfg.v1 v1.2.0/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= -gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= -gopkg.in/gorp.v1 v1.7.2/go.mod h1:Wo3h+DBQZIxATwftsglhdD/62zRFPhGhTiu5jUJmCaw= -gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= -gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.46.0 h1:VeDZbLYGaupuvIrsYCEOe/L/2Pcs5n7hdO1ZTjporag= -gopkg.in/ini.v1 v1.46.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.48.0 h1:URjZc+8ugRY5mL5uUeQH/a63JcHwdX9xZaWvmNWD7z8= -gopkg.in/ini.v1 v1.48.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/mcuadros/go-syslog.v2 v2.2.1/go.mod h1:l5LPIyOOyIdQquNg+oU6Z3524YwrcqEm0aKH+5zpt2U= -gopkg.in/natefinch/lumberjack.v2 v2.0.0-20150622162204-20b71e5b60d7/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/natefinch/lumberjack.v2 v2.0.0-20170531160350-a96e63847dc3/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/square/go-jose.v1 v1.1.2/go.mod h1:QpYS+a4WhS+DTlyQIi6Ka7MS3SuR9a055rgXNEe6EiA= -gopkg.in/square/go-jose.v2 v2.0.0-20180411045311-89060dee6a84/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/square/go-jose.v2 v2.3.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/warnings.v0 v0.1.1/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.0.0/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.1.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20190905181640-827449938966/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools v2.1.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -gotest.tools/gotestsum v0.3.5/go.mod h1:Mnf3e5FUzXbkCfynWBGOwLssY7gTQgCHObK9tMpAriY= -grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= -helm.sh/helm/v3 v3.0.0/go.mod h1:sI7B9yfvMgxtTPMWdk1jSKJ2aa59UyP9qhPydqW6mgo= -helm.sh/helm/v3 v3.0.1/go.mod h1:sI7B9yfvMgxtTPMWdk1jSKJ2aa59UyP9qhPydqW6mgo= -honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20180920025451-e3ad64cb4ed3/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.2/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -k8s.io/api v0.0.0-20190222213804-5cb15d344471 h1:MzQGt8qWQCR+39kbYRd0uQqsvSidpYqJLFeWiJ9l4OE= -k8s.io/api v0.0.0-20190222213804-5cb15d344471/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= -k8s.io/api v0.0.0-20190918195907-bd6ac527cfd2/go.mod h1:AOxZTnaXR/xiarlQL0JUfwQPxjmKDvVYoRp58cA7lUo= -k8s.io/api v0.0.0-20191016110408-35e52d86657a h1:VVUE9xTCXP6KUPMf92cQmN88orz600ebexcRRaBTepQ= -k8s.io/api v0.0.0-20191016110408-35e52d86657a/go.mod h1:/L5qH+AD540e7Cetbui1tuJeXdmNhO8jM6VkXeDdDhQ= -k8s.io/api v0.0.0-20191016225839-816a9b7df678 h1:z/0BV/tMBIvdwZvqBH/f7TWjQX9y3dj1nMNhrSK0h/8= -k8s.io/api v0.0.0-20191016225839-816a9b7df678/go.mod h1:LZQaT8MvVpl7Bg2lYFcQm7+Mpdxq8p1NFl3yh+5DCwY= -k8s.io/apiextensions-apiserver v0.0.0-20190228180357-d002e88f6236 h1:JfFtjaElBIgYKCWEtYQkcNrTpW+lMO4GJy8NP6SVQmM= -k8s.io/apiextensions-apiserver v0.0.0-20190228180357-d002e88f6236/go.mod h1:IxkesAMoaCRoLrPJdZNZUQp9NfZnzqaVzLhb2VEQzXE= -k8s.io/apiextensions-apiserver v0.0.0-20190918201827-3de75813f604/go.mod h1:7H8sjDlWQu89yWB3FhZfsLyRCRLuoXoCoY5qtwW1q6I= -k8s.io/apiextensions-apiserver v0.0.0-20191016113550-5357c4baaf65/go.mod h1:5BINdGqggRXXKnDgpwoJ7PyQH8f+Ypp02fvVNcIFy9s= -k8s.io/apimachinery v0.0.0-20190221213512-86fb29eff628 h1:UYfHH+KEF88OTg+GojQUwFTNxbxwmoktLwutUzR0GPg= -k8s.io/apimachinery v0.0.0-20190221213512-86fb29eff628/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= -k8s.io/apimachinery v0.0.0-20190817020851-f2f3a405f61d/go.mod h1:3jediapYqJ2w1BFw7lAZPCx7scubsTfosqHkhXCWJKw= -k8s.io/apimachinery v0.0.0-20191004115801-a2eda9f80ab8 h1:Iieh/ZEgT3BWwbLD5qEKcY06jKuPEl6zC7gPSehoLw4= -k8s.io/apimachinery v0.0.0-20191004115801-a2eda9f80ab8/go.mod h1:llRdnznGEAqC3DcNm6yEj472xaFVfLM7hnYofMb12tQ= -k8s.io/apimachinery v0.0.0-20191016225534-b1267f8c42b4 h1:XRoHQDFMktgtZM+TTvV8nYcp2hXHhnx1E0RJOUX3t/E= -k8s.io/apimachinery v0.0.0-20191016225534-b1267f8c42b4/go.mod h1:92mWDd8Ji2sw2157KIgino5wCxffA8KSvhW2oY4ypdw= -k8s.io/apiserver v0.0.0-20181026151315-13cfe3978170/go.mod h1:6bqaTSOSJavUIXUtfaR9Os9JtTCm8ZqH2SUl2S60C4w= -k8s.io/apiserver v0.0.0-20181213151703-3ccfe8365421/go.mod h1:6bqaTSOSJavUIXUtfaR9Os9JtTCm8ZqH2SUl2S60C4w= -k8s.io/apiserver v0.0.0-20190918200908-1e17798da8c1/go.mod h1:4FuDU+iKPjdsdQSN3GsEKZLB/feQsj1y9dhhBDVV2Ns= -k8s.io/apiserver v0.0.0-20191016112112-5190913f932d/go.mod h1:7OqfAolfWxUM/jJ/HBLyE+cdaWFBUoo5Q5pHgJVj2ws= -k8s.io/autoscaler v0.0.0-20190607113959-1b4f1855cb8e/go.mod h1:QEXezc9uKPT91dwqhSJq3GNI3B1HxFRQHiku9kmrsSA= -k8s.io/cli-runtime v0.0.0-20181213153952-835b10687cb6/go.mod h1:qWnH3/b8sp/l7EvlDh7ulDU3UWA4P4N1NFbEEP791tM= -k8s.io/cli-runtime v0.0.0-20191016114015-74ad18325ed5/go.mod h1:sDl6WKSQkDM6zS1u9F49a0VooQ3ycYFBFLqd2jf2Xfo= -k8s.io/client-go v0.0.0-20190228174230-b40b2a5939e4 h1:aE8wOCKuoRs2aU0OP/Rz8SXiAB0FTTku3VtGhhrkSmc= -k8s.io/client-go v0.0.0-20190228174230-b40b2a5939e4/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= -k8s.io/client-go v0.0.0-20190918200256-06eb1244587a/go.mod h1:3YAcTbI2ArBRmhHns5vlHRX8YQqvkVYpz+U/N5i1mVU= -k8s.io/client-go v0.0.0-20191016111102-bec269661e48 h1:C2XVy2z0dV94q9hSSoCuTPp1KOG7IegvbdXuz9VGxoU= -k8s.io/client-go v0.0.0-20191016111102-bec269661e48/go.mod h1:hrwktSwYGI4JK+TJA3dMaFyyvHVi/aLarVHpbs8bgCU= -k8s.io/client-go v11.0.0+incompatible h1:LBbX2+lOwY9flffWlJM7f1Ct8V2SRNiMRDFeiwnJo9o= -k8s.io/client-go v11.0.0+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= -k8s.io/cloud-provider v0.0.0-20191016115326-20453efc2458/go.mod h1:O5SO5xcgxrjJV9EC9R/47RuBpbk5YX9URDBlg++FA5o= -k8s.io/cluster-bootstrap v0.0.0-20191016115129-c07a134afb42/go.mod h1:MzCL6kLExQuHruGaqibd8cugC8nw8QRxm3+lzR5l8SI= -k8s.io/code-generator v0.0.0-20181203235156-f8cba74510f3/go.mod h1:MYiN+ZJZ9HkETbgVZdWw2AsuAi9PZ4V80cwfuf2axe8= -k8s.io/code-generator v0.0.0-20190612205613-18da4a14b22b/go.mod h1:G8bQwmHm2eafm5bgtX67XDZQ8CWKSGu9DekI+yN4Y5I= -k8s.io/code-generator v0.0.0-20191004115455-8e001e5d1894/go.mod h1:mJUgkl06XV4kstAnLHAIzJPVCOzVR+ZcfPIv4fUsFCY= -k8s.io/component-base v0.0.0-20190918200425-ed2f0867c778/go.mod h1:DFWQCXgXVLiWtzFaS17KxHdlUeUymP7FLxZSkmL9/jU= -k8s.io/component-base v0.0.0-20191016111319-039242c015a9/go.mod h1:SuWowIgd/dtU/m/iv8OD9eOxp3QZBBhTIiWMsBQvKjI= -k8s.io/cri-api v0.0.0-20190828162817-608eb1dad4ac/go.mod h1:BvtUaNBr0fEpzb11OfrQiJLsLPtqbmulpo1fPwcpP6Q= -k8s.io/csi-translation-lib v0.0.0-20191016115521-756ffa5af0bd/go.mod h1:lf1VBseeLanBpSXD0N9tuPx1ylI8sA0j6f+rckCKiIk= -k8s.io/gengo v0.0.0-20181106084056-51747d6e00da/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20181113154421-fd15ee9cc2f7/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20190327210449-e17681d19d3a h1:QoHVuRquf80YZ+/bovwxoMO3Q/A3nt3yTgS0/0nejuk= -k8s.io/gengo v0.0.0-20190327210449-e17681d19d3a/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20191010091904-7fa3014cb28f/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/heapster v1.2.0-beta.1/go.mod h1:h1uhptVXMwC8xtZBYsPXKVi8fpdlYkTs6k949KozGrM= -k8s.io/helm v2.13.1+incompatible/go.mod h1:LZzlS4LQBHfciFOurYBFkCMTaZ0D1l+p0teMg7TSULI= -k8s.io/helm v2.16.1+incompatible/go.mod h1:LZzlS4LQBHfciFOurYBFkCMTaZ0D1l+p0teMg7TSULI= -k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.1.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.2.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.3.1 h1:RVgyDHY/kFKtLqh67NvEWIgkMneNoIrdkN0CxDSQc68= -k8s.io/klog v0.3.1/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.3.3/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.4.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= -k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/kube-aggregator v0.0.0-20181204002017-122bac39d429/go.mod h1:8sbzT4QQKDEmSCIbfqjV0sd97GpUT7A4W626sBiYJmU= -k8s.io/kube-aggregator v0.0.0-20181213152105-1e8cd453c474/go.mod h1:8sbzT4QQKDEmSCIbfqjV0sd97GpUT7A4W626sBiYJmU= -k8s.io/kube-aggregator v0.0.0-20191016112429-9587704a8ad4/go.mod h1:+aW0UZgSXdTSHTIFnWnueEuXjOqerDUxGIw6Ygr+vYY= -k8s.io/kube-controller-manager v0.0.0-20191016114939-2b2b218dc1df/go.mod h1:WgrTcPKYAfNa9C0LV1UeK+XqfbSOUH1WGq/vX5UiW40= -k8s.io/kube-openapi v0.0.0-20181031203759-72693cb1fadd/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= -k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= -k8s.io/kube-openapi v0.0.0-20190320154901-5e45bb682580/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= -k8s.io/kube-openapi v0.0.0-20190603182131-db7b694dc208 h1:5sW+fEHvlJI3Ngolx30CmubFulwH28DhKjGf70Xmtco= -k8s.io/kube-openapi v0.0.0-20190603182131-db7b694dc208/go.mod h1:nfDlWeOsu3pUf4yWGL+ERqohP4YsZcBJXWMK+gkzOA4= -k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf h1:EYm5AW/UUDbnmnI+gK0TJDVK9qPLhM+sRHYanNKw0EQ= -k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= -k8s.io/kube-openapi v0.0.0-20190918143330-0270cf2f1c1d h1:Xpe6sK+RY4ZgCTyZ3y273UmFmURhjtoJiwOMbQsXitY= -k8s.io/kube-openapi v0.0.0-20190918143330-0270cf2f1c1d/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= -k8s.io/kube-proxy v0.0.0-20191016114407-2e83b6f20229/go.mod h1:2Hxci1uzXO5ipP0h9n2+h18fvNkBTpYlckk5dOPu8zg= -k8s.io/kube-scheduler v0.0.0-20191016114748-65049c67a58b/go.mod h1:BgDUHHC5Wl0xcBUQgo2XEprE5nG5i9tlRR4iNgEFbL0= -k8s.io/kube-state-metrics v1.6.0 h1:6wkxiTPTZ4j+LoMWuT+rZ+OlUWswktzXs5yHmggmAZ0= -k8s.io/kube-state-metrics v1.6.0/go.mod h1:84+q9aGVQPzXYGgtvyhZr/fSI6BdLsbPWXn37RASc9k= -k8s.io/kube-state-metrics v1.7.2 h1:6vdtgXrrRRMSgnyDmgua+qvgCYv954JNfxXAtDkeLVQ= -k8s.io/kube-state-metrics v1.7.2/go.mod h1:U2Y6DRi07sS85rmVPmBFlmv+2peBcL8IWGjM+IjYA/E= -k8s.io/kubectl v0.0.0-20191016120415-2ed914427d51/go.mod h1:gL826ZTIfD4vXTGlmzgTbliCAT9NGiqpCqK2aNYv5MQ= -k8s.io/kubelet v0.0.0-20191016114556-7841ed97f1b2/go.mod h1:SBvrtLbuePbJygVXGGCMtWKH07+qrN2dE1iMnteSG8E= -k8s.io/kubernetes v1.11.7-beta.0.0.20181219023948-b875d52ea96d/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= -k8s.io/kubernetes v1.11.8-beta.0.0.20190124204751-3a10094374f2/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= -k8s.io/kubernetes v1.16.0/go.mod h1:nlP2zevWKRGKuaaVbKIwozU0Rjg9leVDXkL4YTtjmVs= -k8s.io/kubernetes v1.16.2/go.mod h1:SmhGgKfQ30imqjFVj8AI+iW+zSyFsswNErKYeTfgoH0= -k8s.io/legacy-cloud-providers v0.0.0-20191016115753-cf0698c3a16b/go.mod h1:tKW3pKqdRW8pMveUTpF5pJuCjQxg6a25iLo+Z9BXVH0= -k8s.io/metrics v0.0.0-20191016113814-3b1a734dba6e/go.mod h1:ve7/vMWeY5lEBkZf6Bt5TTbGS3b8wAxwGbdXAsufjRs= -k8s.io/repo-infra v0.0.0-20181204233714-00fe14e3d1a3/go.mod h1:+G1xBfZDfVFsm1Tj/HNCvg4QqWx8rJ2Fxpqr1rqp/gQ= -k8s.io/sample-apiserver v0.0.0-20191016112829-06bb3c9d77c9/go.mod h1:sXltHZrQa4jdKL14nOFRRUhhzpmbnRF0qGuAhRQbaxc= -k8s.io/utils v0.0.0-20190221042446-c2654d5206da/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0= -k8s.io/utils v0.0.0-20190308190857-21c4ce38f2a7/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0= -k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5 h1:VBM/0P5TWxwk+Nw6Z+lAw3DKgO76g90ETOiA6rfLV1Y= -k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -k8s.io/utils v0.0.0-20190801114015-581e00157fb1/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -k8s.io/utils v0.0.0-20191010214722-8d271d903fe4 h1:Gi+/O1saihwDqnlmC8Vhv1M5Sp4+rbOmK9TbsLn8ZEA= -k8s.io/utils v0.0.0-20191010214722-8d271d903fe4/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= -modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= -modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= -modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= -modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= -rsc.io/letsencrypt v0.0.1/go.mod h1:buyQKZ6IXrRnB7TdkHP0RyEybLx18HHyOSoTyoOLqNY= -sigs.k8s.io/controller-runtime v0.1.12 h1:ovDq28E64PeY1yR+6H7DthakIC09soiDCrKvfP2tPYo= -sigs.k8s.io/controller-runtime v0.1.12/go.mod h1:HFAYoOh6XMV+jKF1UjFwrknPbowfyHEHHRdJMf2jMX8= -sigs.k8s.io/controller-runtime v0.3.0 h1:ZtdgqJXVHsIytjdmDuk0QjagnzyLq9FjojXRqIp+dU4= -sigs.k8s.io/controller-runtime v0.3.0/go.mod h1:Cw6PkEg0Sa7dAYovGT4R0tRkGhHXpYijwNxYhAnAZZk= -sigs.k8s.io/controller-runtime v0.4.0 h1:wATM6/m+3w8lj8FXNaO6Fs/rq/vqoOjO1Q116Z9NPsg= -sigs.k8s.io/controller-runtime v0.4.0/go.mod h1:ApC79lpY3PHW9xj/w9pj+lYkLgwAAUZwfXkME1Lajns= -sigs.k8s.io/controller-tools v0.1.11-0.20190411181648-9d55346c2bde h1:ZkaHf5rNYzIB6CB82keKMQNv7xxkqT0ylOBdfJPfi+k= -sigs.k8s.io/controller-tools v0.1.11-0.20190411181648-9d55346c2bde/go.mod h1:ATWLRP3WGxuAN9HcT2LaKHReXIH+EZGzRuMHuxjXfhQ= -sigs.k8s.io/controller-tools v0.2.4/go.mod h1:m/ztfQNocGYBgTTCmFdnK94uVvgxeZeE3LtJvd/jIzA= -sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU= -sigs.k8s.io/structured-merge-diff v0.0.0-20190302045857-e85c7b244fd2/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= -sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= -sigs.k8s.io/structured-merge-diff v0.0.0-20190817042607-6149e4549fca/go.mod h1:IIgPezJWb76P0hotTxzDbWsMYB8APh18qZnxkomBpxA= -sigs.k8s.io/testing_frameworks v0.1.0/go.mod h1:VVBKrHmJ6Ekkfz284YKhQePcdycOzNH9qL6ht1zEr/U= -sigs.k8s.io/testing_frameworks v0.1.1/go.mod h1:VVBKrHmJ6Ekkfz284YKhQePcdycOzNH9qL6ht1zEr/U= -sigs.k8s.io/testing_frameworks v0.1.2/go.mod h1:ToQrwSC3s8Xf/lADdZp3Mktcql9CG0UAmdJG9th5i0w= -sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -vbom.ml/util v0.0.0-20160121211510-db5cfe13f5cc/go.mod h1:so/NYdZXCz+E3ZpW0uAoCj6uzU2+8OWDFv/HxUSs7kI= -vbom.ml/util v0.0.0-20180919145318-efcd4e0f9787/go.mod h1:so/NYdZXCz+E3ZpW0uAoCj6uzU2+8OWDFv/HxUSs7kI= diff --git a/cmd/bpa-operator/pkg/apis/addtoscheme_bpa_v1alpha1.go b/cmd/bpa-operator/pkg/apis/addtoscheme_bpa_v1alpha1.go deleted file mode 100644 index ef79c5c..0000000 --- a/cmd/bpa-operator/pkg/apis/addtoscheme_bpa_v1alpha1.go +++ /dev/null @@ -1,10 +0,0 @@ -package apis - -import ( - "github.com/bpa-operator/pkg/apis/bpa/v1alpha1" -) - -func init() { - // Register the types with the Scheme so the components can map objects to GroupVersionKinds and back - AddToSchemes = append(AddToSchemes, v1alpha1.SchemeBuilder.AddToScheme) -} diff --git a/cmd/bpa-operator/pkg/apis/apis.go b/cmd/bpa-operator/pkg/apis/apis.go deleted file mode 100644 index 07dc961..0000000 --- a/cmd/bpa-operator/pkg/apis/apis.go +++ /dev/null @@ -1,13 +0,0 @@ -package apis - -import ( - "k8s.io/apimachinery/pkg/runtime" -) - -// AddToSchemes may be used to add all resources defined in the project to a Scheme -var AddToSchemes runtime.SchemeBuilder - -// AddToScheme adds all Resources to the Scheme -func AddToScheme(s *runtime.Scheme) error { - return AddToSchemes.AddToScheme(s) -} diff --git a/cmd/bpa-operator/pkg/apis/bpa/group.go b/cmd/bpa-operator/pkg/apis/bpa/group.go deleted file mode 100644 index 28de849..0000000 --- a/cmd/bpa-operator/pkg/apis/bpa/group.go +++ /dev/null @@ -1,6 +0,0 @@ -// Package bpa contains bpa API versions. -// -// This file ensures Go source parsers acknowledge the bpa package -// and any child packages. It can be removed if any other Go source files are -// added to this package. -package bpa diff --git a/cmd/bpa-operator/pkg/apis/bpa/v1alpha1/doc.go b/cmd/bpa-operator/pkg/apis/bpa/v1alpha1/doc.go deleted file mode 100644 index 3e94580..0000000 --- a/cmd/bpa-operator/pkg/apis/bpa/v1alpha1/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// Package v1alpha1 contains API Schema definitions for the bpa v1alpha1 API group -// +k8s:deepcopy-gen=package,register -// +groupName=bpa.akraino.org -package v1alpha1 diff --git a/cmd/bpa-operator/pkg/apis/bpa/v1alpha1/provisioning_types.go b/cmd/bpa-operator/pkg/apis/bpa/v1alpha1/provisioning_types.go deleted file mode 100644 index da92dd2..0000000 --- a/cmd/bpa-operator/pkg/apis/bpa/v1alpha1/provisioning_types.go +++ /dev/null @@ -1,70 +0,0 @@ -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! -// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. - -// ProvisioningSpec defines the desired state of Provisioning -// +k8s:openapi-gen=true -type ProvisioningSpec struct { - // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster - // Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file - // Add custom validation using kubebuilder tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html - Masters []map[string]Master `json:"masters,omitempty"` - Workers []map[string]Worker `json:"workers,omitempty"` - KUDPlugins []string `json:"KUDPlugins,omitempty"` - PodSubnet string `json:"PodSubnet,omitempty"` -} - -// ProvisioningStatus defines the observed state of Provisioning -// +k8s:openapi-gen=true -type ProvisioningStatus struct { - // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster - // Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file - // Add custom validation using kubebuilder tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Provisioning is the Schema for the provisionings API -// +k8s:openapi-gen=true -// +kubebuilder:subresource:status -type Provisioning struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec ProvisioningSpec `json:"spec,omitempty"` - Status ProvisioningStatus `json:"status,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ProvisioningList contains a list of Provisioning -type ProvisioningList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []Provisioning `json:"items"` -} - -// master struct contains resource requirements for a master node -type Master struct { - MACaddress string `json:"mac-address,omitempty"` - CPU int32 `json:"cpu,omitempty"` - Memory string `json:"memory,omitempty"` -} - -// worker struct contains resource requirements for a worker node -type Worker struct { - MACaddress string `json:"mac-address,omitempty"` - CPU int32 `json:"cpu,omitempty"` - Memory string `json:"memory,omitempty"` - SRIOV bool `json:"sriov,omitempty"` - QAT bool `json:"qat,omitempty"` -} - -func init() { - SchemeBuilder.Register(&Provisioning{}, &ProvisioningList{}) -} diff --git a/cmd/bpa-operator/pkg/apis/bpa/v1alpha1/register.go b/cmd/bpa-operator/pkg/apis/bpa/v1alpha1/register.go deleted file mode 100644 index 3dc2439..0000000 --- a/cmd/bpa-operator/pkg/apis/bpa/v1alpha1/register.go +++ /dev/null @@ -1,19 +0,0 @@ -// NOTE: Boilerplate only. Ignore this file. - -// Package v1alpha1 contains API Schema definitions for the bpa v1alpha1 API group -// +k8s:deepcopy-gen=package,register -// +groupName=bpa.akraino.org -package v1alpha1 - -import ( - "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/controller-runtime/pkg/runtime/scheme" -) - -var ( - // SchemeGroupVersion is group version used to register these objects - SchemeGroupVersion = schema.GroupVersion{Group: "bpa.akraino.org", Version: "v1alpha1"} - - // SchemeBuilder is used to add go types to the GroupVersionKind scheme - SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} -) diff --git a/cmd/bpa-operator/pkg/apis/bpa/v1alpha1/software_types.go b/cmd/bpa-operator/pkg/apis/bpa/v1alpha1/software_types.go deleted file mode 100644 index ba79c8f..0000000 --- a/cmd/bpa-operator/pkg/apis/bpa/v1alpha1/software_types.go +++ /dev/null @@ -1,53 +0,0 @@ -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! -// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. - -// SoftwareSpec defines the desired state of Software -// +k8s:openapi-gen=true -type SoftwareSpec struct { - // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster - // Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file - // Add custom validation using kubebuilder tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html - //MasterSoftware map[string][]interface{} `json:"masterSoftware,omitempty"` - MasterSoftware []interface{} `json:"masterSoftware,omitempty"` - WorkerSoftware []interface{} `json:"workerSoftware,omitempty"` -} - -// SoftwareStatus defines the observed state of Software -// +k8s:openapi-gen=true -type SoftwareStatus struct { - // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster - // Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file - // Add custom validation using kubebuilder tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Software is the Schema for the softwares API -// +k8s:openapi-gen=true -// +kubebuilder:subresource:status -type Software struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec SoftwareSpec `json:"spec,omitempty"` - Status SoftwareStatus `json:"status,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// SoftwareList contains a list of Software -type SoftwareList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []Software `json:"items"` -} - -func init() { - SchemeBuilder.Register(&Software{}, &SoftwareList{}) -} diff --git a/cmd/bpa-operator/pkg/apis/bpa/v1alpha1/zz_generated.deepcopy.go b/cmd/bpa-operator/pkg/apis/bpa/v1alpha1/zz_generated.deepcopy.go deleted file mode 100644 index 878b20d..0000000 --- a/cmd/bpa-operator/pkg/apis/bpa/v1alpha1/zz_generated.deepcopy.go +++ /dev/null @@ -1,253 +0,0 @@ -// +build !ignore_autogenerated - -// Code generated by operator-sdk. DO NOT EDIT. - -package v1alpha1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Master) DeepCopyInto(out *Master) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Master. -func (in *Master) DeepCopy() *Master { - if in == nil { - return nil - } - out := new(Master) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Provisioning) DeepCopyInto(out *Provisioning) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Provisioning. -func (in *Provisioning) DeepCopy() *Provisioning { - if in == nil { - return nil - } - out := new(Provisioning) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Provisioning) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ProvisioningList) DeepCopyInto(out *ProvisioningList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Provisioning, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProvisioningList. -func (in *ProvisioningList) DeepCopy() *ProvisioningList { - if in == nil { - return nil - } - out := new(ProvisioningList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ProvisioningList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ProvisioningSpec) DeepCopyInto(out *ProvisioningSpec) { - *out = *in - if in.Masters != nil { - in, out := &in.Masters, &out.Masters - *out = make([]map[string]Master, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = make(map[string]Master, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - } - } - if in.Workers != nil { - in, out := &in.Workers, &out.Workers - *out = make([]map[string]Worker, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = make(map[string]Worker, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProvisioningSpec. -func (in *ProvisioningSpec) DeepCopy() *ProvisioningSpec { - if in == nil { - return nil - } - out := new(ProvisioningSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ProvisioningStatus) DeepCopyInto(out *ProvisioningStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProvisioningStatus. -func (in *ProvisioningStatus) DeepCopy() *ProvisioningStatus { - if in == nil { - return nil - } - out := new(ProvisioningStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Software) DeepCopyInto(out *Software) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - out.Status = in.Status - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Software. -func (in *Software) DeepCopy() *Software { - if in == nil { - return nil - } - out := new(Software) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Software) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SoftwareList) DeepCopyInto(out *SoftwareList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Software, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SoftwareList. -func (in *SoftwareList) DeepCopy() *SoftwareList { - if in == nil { - return nil - } - out := new(SoftwareList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *SoftwareList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SoftwareSpec) DeepCopyInto(out *SoftwareSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SoftwareSpec. -func (in *SoftwareSpec) DeepCopy() *SoftwareSpec { - if in == nil { - return nil - } - out := new(SoftwareSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SoftwareStatus) DeepCopyInto(out *SoftwareStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SoftwareStatus. -func (in *SoftwareStatus) DeepCopy() *SoftwareStatus { - if in == nil { - return nil - } - out := new(SoftwareStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Worker) DeepCopyInto(out *Worker) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Worker. -func (in *Worker) DeepCopy() *Worker { - if in == nil { - return nil - } - out := new(Worker) - in.DeepCopyInto(out) - return out -} diff --git a/cmd/bpa-operator/pkg/apis/bpa/v1alpha1/zz_generated.openapi.go b/cmd/bpa-operator/pkg/apis/bpa/v1alpha1/zz_generated.openapi.go deleted file mode 100644 index 558f68a..0000000 --- a/cmd/bpa-operator/pkg/apis/bpa/v1alpha1/zz_generated.openapi.go +++ /dev/null @@ -1,85 +0,0 @@ -// +build ! - -// This file was autogenerated by openapi-gen. Do not edit it manually! - -package v1alpha1 - -import ( - spec "github.com/go-openapi/spec" - common "k8s.io/kube-openapi/pkg/common" -) - -func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition { - return map[string]common.OpenAPIDefinition{ - "github.com/bpa-operator/pkg/apis/bpa/v1alpha1.Provisioning": schema_pkg_apis_bpa_v1alpha1_Provisioning(ref), - "github.com/bpa-operator/pkg/apis/bpa/v1alpha1.ProvisioningSpec": schema_pkg_apis_bpa_v1alpha1_ProvisioningSpec(ref), - "github.com/bpa-operator/pkg/apis/bpa/v1alpha1.ProvisioningStatus": schema_pkg_apis_bpa_v1alpha1_ProvisioningStatus(ref), - } -} - -func schema_pkg_apis_bpa_v1alpha1_Provisioning(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Provisioning is the Schema for the provisionings API", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "spec": { - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/bpa-operator/pkg/apis/bpa/v1alpha1.ProvisioningSpec"), - }, - }, - "status": { - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/bpa-operator/pkg/apis/bpa/v1alpha1.ProvisioningStatus"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/bpa-operator/pkg/apis/bpa/v1alpha1.ProvisioningSpec", "github.com/bpa-operator/pkg/apis/bpa/v1alpha1.ProvisioningStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_pkg_apis_bpa_v1alpha1_ProvisioningSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ProvisioningSpec defines the desired state of Provisioning", - Properties: map[string]spec.Schema{}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_pkg_apis_bpa_v1alpha1_ProvisioningStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ProvisioningStatus defines the observed state of Provisioning", - Properties: map[string]spec.Schema{}, - }, - }, - Dependencies: []string{}, - } -} diff --git a/cmd/bpa-operator/pkg/controller/add_provisioning.go b/cmd/bpa-operator/pkg/controller/add_provisioning.go deleted file mode 100644 index 4f853cd..0000000 --- a/cmd/bpa-operator/pkg/controller/add_provisioning.go +++ /dev/null @@ -1,10 +0,0 @@ -package controller - -import ( - "github.com/bpa-operator/pkg/controller/provisioning" -) - -func init() { - // AddToManagerFuncs is a list of functions to create controllers and add them to a manager. - AddToManagerFuncs = append(AddToManagerFuncs, provisioning.Add) -} diff --git a/cmd/bpa-operator/pkg/controller/controller.go b/cmd/bpa-operator/pkg/controller/controller.go deleted file mode 100644 index 7c069f3..0000000 --- a/cmd/bpa-operator/pkg/controller/controller.go +++ /dev/null @@ -1,18 +0,0 @@ -package controller - -import ( - "sigs.k8s.io/controller-runtime/pkg/manager" -) - -// AddToManagerFuncs is a list of functions to add all Controllers to the Manager -var AddToManagerFuncs []func(manager.Manager) error - -// AddToManager adds all Controllers to the Manager -func AddToManager(m manager.Manager) error { - for _, f := range AddToManagerFuncs { - if err := f(m); err != nil { - return err - } - } - return nil -} diff --git a/cmd/bpa-operator/pkg/controller/provisioning/provisioning_controller.go b/cmd/bpa-operator/pkg/controller/provisioning/provisioning_controller.go deleted file mode 100644 index 0ad5a47..0000000 --- a/cmd/bpa-operator/pkg/controller/provisioning/provisioning_controller.go +++ /dev/null @@ -1,864 +0,0 @@ -package provisioning - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io/ioutil" - "os" - "strings" - "time" - - bpav1alpha1 "github.com/bpa-operator/pkg/apis/bpa/v1alpha1" - batchv1 "k8s.io/api/batch/v1" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/dynamic" - - "golang.org/x/crypto/ssh" - "gopkg.in/ini.v1" - "k8s.io/client-go/kubernetes" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/config" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" - "sigs.k8s.io/controller-runtime/pkg/source" -) - -type VirtletVM struct { - IPaddress string - MACaddress string -} - -type NetworksStatus struct { - Name string `json:"name,omitempty"` - Interface string `json:"interface,omitempty"` - Ips []string `json:"ips,omitempty"` - Mac string `json:"mac,omitempty"` - Default bool `json:"default,omitempty"` - Dns interface{} `json:"dns,omitempty"` -} - -var log = logf.Log.WithName("controller_provisioning") - -/** -* USER ACTION REQUIRED: This is a scaffold file intended for the user to modify with their own Controller -* business logic. Delete these comments after modifying this file.* - */ - -// Add creates a new Provisioning Controller and adds it to the Manager. The Manager will set fields on the Controller -// and Start it when the Manager is Started. -func Add(mgr manager.Manager) error { - return add(mgr, newReconciler(mgr)) -} - -// newReconciler returns a new reconcile.Reconciler -func newReconciler(mgr manager.Manager) reconcile.Reconciler { - - config, err := config.GetConfig() - if err != nil { - fmt.Printf("Could not get kube config, Error: %v\n", err) - } - - clientSet, err := kubernetes.NewForConfig(config) - if err != nil { - fmt.Printf("Could not create clientset, Error: %v\n", err) - } - bmhDynamicClient, err := dynamic.NewForConfig(config) - - if err != nil { - fmt.Printf("Could not create dynamic client for bareMetalHosts, Error: %v\n", err) - } - - return &ReconcileProvisioning{client: mgr.GetClient(), scheme: mgr.GetScheme(), clientset: clientSet, bmhClient: bmhDynamicClient} -} - -// add adds a new Controller to mgr with r as the reconcile.Reconciler -func add(mgr manager.Manager, r reconcile.Reconciler) error { - // Create a new controller - c, err := controller.New("provisioning-controller", mgr, controller.Options{Reconciler: r}) - if err != nil { - return err - } - - // Watch for changes to primary resource Provisioning - err = c.Watch(&source.Kind{Type: &bpav1alpha1.Provisioning{}}, &handler.EnqueueRequestForObject{}) - if err != nil { - return err - } - - // Watch for changes to resource configmap created as a consequence of the provisioning CR - err = c.Watch(&source.Kind{Type: &corev1.ConfigMap{}}, &handler.EnqueueRequestForOwner{ - IsController: true, - OwnerType: &bpav1alpha1.Provisioning{}, - }) - - if err != nil { - return err - } - - //Watch for changes to job resource also created as a consequence of the provisioning CR - err = c.Watch(&source.Kind{Type: &batchv1.Job{}}, &handler.EnqueueRequestForOwner{ - IsController: true, - OwnerType: &bpav1alpha1.Provisioning{}, - }) - - if err != nil { - return err - } - - // Watch for changes to resource software CR - err = c.Watch(&source.Kind{Type: &bpav1alpha1.Software{}}, &handler.EnqueueRequestForObject{}) - if err != nil { - return err - } - - return nil -} - -// blank assignment to verify that ReconcileProvisioning implements reconcile.Reconciler -var _ reconcile.Reconciler = &ReconcileProvisioning{} - -// ReconcileProvisioning reconciles a Provisioning object -type ReconcileProvisioning struct { - // This client, initialized using mgr.Client() above, is a split client - // that reads objects from the cache and writes to the apiserver - client client.Client - scheme *runtime.Scheme - clientset kubernetes.Interface - bmhClient dynamic.Interface -} - -// Reconcile reads that state of the cluster for a Provisioning object and makes changes based on the state read -// and what is in the Provisioning.Spec -// TODO(user): Modify this Reconcile function to implement your Controller logic. This example creates -// a Pod as an example -// Note: -// The Controller will requeue the Request to be processed again if the returned error is non-nil or -// Result.Requeue is true, otherwise upon completion it will remove the work from the queue. -func (r *ReconcileProvisioning) Reconcile(request reconcile.Request) (reconcile.Result, error) { - reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name) - fmt.Printf("\n\n") - reqLogger.Info("Reconciling Custom Resource") - - // Fetch the Provisioning instance - provisioningInstance := &bpav1alpha1.Provisioning{} - softwareInstance := &bpav1alpha1.Software{} - err := r.client.Get(context.TODO(), request.NamespacedName, provisioningInstance) - provisioningCreated := true - if err != nil { - - //Check if its a Software Instance - err = r.client.Get(context.TODO(), request.NamespacedName, softwareInstance) - if err != nil { - if errors.IsNotFound(err) { - // Request object not found, could have been deleted after reconcile request. - // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. - // Return and don't requeue - return reconcile.Result{}, nil - } - - // Error reading the object - requeue the request. - return reconcile.Result{}, err - } - - //No error occured and so a Software CR was created not a Provisoning CR - provisioningCreated = false - } - - masterTag := "MASTER_" - workerTag := "WORKER_" - - if provisioningCreated { - - /////////////////////////////////////////////////////////////////////////////////////////////// - //////////////// Provisioning CR was created so install KUD ///////////////// - ////////////////////////////////////////////////////////////////////////////////////////////// - clusterName := provisioningInstance.Labels["cluster"] - clusterType := provisioningInstance.Labels["cluster-type"] - mastersList := provisioningInstance.Spec.Masters - workersList := provisioningInstance.Spec.Workers - kudPlugins := provisioningInstance.Spec.KUDPlugins - podSubnet := provisioningInstance.Spec.PodSubnet - - bareMetalHostList, _ := listBareMetalHosts(r.bmhClient) - virtletVMList, _ := listVirtletVMs(r.clientset) - - var allString string - var masterString string - var workerString string - - multiClusterDir := "/multi-cluster" - - //Create Directory for the specific cluster - clusterDir := multiClusterDir + "/" + clusterName - os.MkdirAll(clusterDir, os.ModePerm) - - //Create Maps to be used for cluster ip address to label configmap - clusterLabel := make(map[string]string) - clusterLabel["cluster"] = clusterName - clusterData := make(map[string]string) - - //Iterate through mastersList and get all the mac addresses and IP addresses - for _, masterMap := range mastersList { - - for masterLabel, master := range masterMap { - masterMAC := master.MACaddress - hostIPaddress := "" - - if masterMAC == "" { - err = fmt.Errorf("MAC address for masterNode %s not provided\n", masterLabel) - return reconcile.Result{}, err - } - - containsMac, bmhCR := checkMACaddress(bareMetalHostList, masterMAC) - - //Check 'cluster-type' label for Virtlet VMs - if clusterType == "virtlet-vm" { - //Get VM IP address of master - hostIPaddress, err = getVMIPaddress(virtletVMList, masterMAC) - if err != nil || hostIPaddress == "" { - err = fmt.Errorf("IP address not found for VM with MAC address %s \n", masterMAC) - return reconcile.Result{}, err - } - containsMac = true - } - - if containsMac { - - if clusterType != "virtlet-vm" { - fmt.Printf("BareMetalHost CR %s has NIC with MAC Address %s\n", bmhCR, masterMAC) - - //Get IP address of master - hostIPaddress, err = getHostIPaddress(bareMetalHostList, masterMAC) - if err != nil || hostIPaddress == "" { - err = fmt.Errorf("IP address not found for host with MAC address %s \n", masterMAC) - return reconcile.Result{}, err - } - allString += masterLabel + " ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + "\n" - } - - if clusterType == "virtlet-vm" { - allString += masterLabel + " ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + " ansible_ssh_user=root" + " ansible_ssh_pass=root" + "\n" - } - masterString += masterLabel + "\n" - clusterData[masterTag+masterLabel] = hostIPaddress - - fmt.Printf("%s : %s \n", hostIPaddress, masterMAC) - - if len(workersList) != 0 { - - //Iterate through workersList and get all the mac addresses - for _, workerMap := range workersList { - - //Get worker labels from the workermap - for workerLabel, worker := range workerMap { - - //Check if workerString already contains worker label - containsWorkerLabel := strings.Contains(workerString, workerLabel) - workerMAC := worker.MACaddress - hostIPaddress = "" - - //Error occurs if the same label is given to different hosts (assumption, - //each MAC address represents a unique host - if workerLabel == masterLabel && workerMAC != masterMAC && workerMAC != "" { - if containsWorkerLabel { - strings.ReplaceAll(workerString, workerLabel, "") - } - err = fmt.Errorf(`A node with label %s already exists, modify resource and assign a - different label to node with MACAddress %s`, workerLabel, workerMAC) - return reconcile.Result{}, err - - //same node performs worker and master roles - } else if workerLabel == masterLabel && !containsWorkerLabel { - workerString += workerLabel + "\n" - - //Add host to ip address config map with worker tag - hostIPaddress = clusterData[masterTag+masterLabel] - clusterData[workerTag+masterLabel] = hostIPaddress - - //Error occurs if the same node is given different labels - } else if workerLabel != masterLabel && workerMAC == masterMAC { - if containsWorkerLabel { - strings.ReplaceAll(workerString, workerLabel, "") - } - err = fmt.Errorf(`A node with label %s already exists, modify resource and assign a - different label to node with MACAddress %s`, workerLabel, workerMAC) - return reconcile.Result{}, err - - //worker node is different from any master node and it has not been added to the worker list - } else if workerLabel != masterLabel && !containsWorkerLabel { - - // Error occurs if MAC address not provided for worker node not matching master - if workerMAC == "" { - err = fmt.Errorf("MAC address for worker %s not provided", workerLabel) - return reconcile.Result{}, err - } - - containsMac, bmhCR := checkMACaddress(bareMetalHostList, workerMAC) - - if clusterType == "virtlet-vm" { - //Get VM IP address of master - hostIPaddress, err = getVMIPaddress(virtletVMList, workerMAC) - if err != nil || hostIPaddress == "" { - err = fmt.Errorf("IP address not found for VM with MAC address %s \n", workerMAC) - return reconcile.Result{}, err - } - containsMac = true - } - - if containsMac { - - if clusterType != "virtlet-vm" { - fmt.Printf("Host %s matches that macAddress\n", bmhCR) - - //Get IP address of worker - hostIPaddress, err = getHostIPaddress(bareMetalHostList, workerMAC) - if err != nil { - fmt.Errorf("IP address not found for host with MAC address %s \n", workerMAC) - return reconcile.Result{}, err - } - allString += workerLabel + " ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + "\n" - } - fmt.Printf("%s : %s \n", hostIPaddress, workerMAC) - - if clusterType == "virtlet-vm" { - allString += masterLabel + " ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + " ansible_ssh_user=root" + " ansible_ssh_pass=root" + "\n" - } - workerString += workerLabel + "\n" - clusterData[workerTag+workerLabel] = hostIPaddress - - //No host found that matches the worker MAC - } else { - - err = fmt.Errorf("Host with MAC Address %s not found\n", workerMAC) - return reconcile.Result{}, err - } - } - } - } - //No worker node specified, add master as worker node - } else if len(workersList) == 0 && !strings.Contains(workerString, masterLabel) { - workerString += masterLabel + "\n" - - //Add host to ip address config map with worker tag - hostIPaddress = clusterData[masterTag+masterLabel] - clusterData[workerTag+masterLabel] = hostIPaddress - } - - //No host matching master MAC found - } else { - err = fmt.Errorf("Host with MAC Address %s not found\n", masterMAC) - return reconcile.Result{}, err - } - } - } - - //Create host.ini file - //iniHostFilePath := kudInstallerScript + "/inventory/hosts.ini" - iniHostFilePath := clusterDir + "/hosts.ini" - newFile, err := os.Create(iniHostFilePath) - defer newFile.Close() - - if err != nil { - fmt.Printf("Error occured while creating file \n %v", err) - return reconcile.Result{}, err - } - - hostFile, err := ini.Load(iniHostFilePath) - if err != nil { - fmt.Printf("Error occured while Loading file \n %v", err) - return reconcile.Result{}, err - } - - _, err = hostFile.NewRawSection("all", allString) - if err != nil { - fmt.Printf("Error occured while creating section \n %v", err) - return reconcile.Result{}, err - } - _, err = hostFile.NewRawSection("kube-master", masterString) - if err != nil { - fmt.Printf("Error occured while creating section \n %v", err) - return reconcile.Result{}, err - } - - _, err = hostFile.NewRawSection("kube-node", workerString) - if err != nil { - fmt.Printf("Error occured while creating section \n %v", err) - return reconcile.Result{}, err - } - - _, err = hostFile.NewRawSection("etcd", masterString) - if err != nil { - fmt.Printf("Error occured while creating section \n %v", err) - return reconcile.Result{}, err - } - - if clusterType != "virtlet-vm" { - _, err = hostFile.NewRawSection("ovn-central", masterString) - if err != nil { - fmt.Printf("Error occured while creating section \n %v", err) - return reconcile.Result{}, err - } - - _, err = hostFile.NewRawSection("ovn-controller", workerString) - if err != nil { - fmt.Printf("Error occured while creating section \n %v", err) - return reconcile.Result{}, err - } - - _, err = hostFile.NewRawSection("virtlet", workerString) - if err != nil { - fmt.Printf("Error occured while creating section \n %v", err) - return reconcile.Result{}, err - } - } - _, err = hostFile.NewRawSection("k8s-cluster:children", "kube-node\n"+"kube-master") - if err != nil { - fmt.Printf("Error occured while creating section \n %v", err) - return reconcile.Result{}, err - } - - //Create host.ini file for KUD - hostFile.SaveTo(iniHostFilePath) - - //Install KUD - err = createKUDinstallerJob(clusterName, request.Namespace, clusterLabel, podSubnet, kudPlugins, r.clientset) - if err != nil { - fmt.Printf("Error occured while creating KUD Installer job for cluster %v\n ERROR: %v", clusterName, err) - return reconcile.Result{}, err - } - - //Start separate thread to keep checking job status, Create an IP address configmap - //for cluster if KUD is successfully installed - go checkJob(clusterName, request.Namespace, clusterData, clusterLabel, r.clientset) - - return reconcile.Result{}, nil - - } - - /////////////////////////////////////////////////////////////////////////////////////////////// - //////////////// Software CR was created so install software ///////////////// - ////////////////////////////////////////////////////////////////////////////////////////////// - softwareClusterName, masterSoftwareList, workerSoftwareList := getSoftwareList(softwareInstance) - defaultSSHPrivateKey := "/root/.ssh/id_rsa" - - //Get IP address configmap for the cluster - clusterConfigMapData, err := getConfigMapData(request.Namespace, softwareClusterName, r.clientset) - if err != nil { - fmt.Printf("Error occured while retrieving IP address Data for cluster %s, ERROR: %v\n", softwareClusterName, err) - return reconcile.Result{}, err - } - - for hostLabel, ipAddress := range clusterConfigMapData { - - if strings.Contains(hostLabel, masterTag) { - // Its a master node, install master software - err = softwareInstaller(ipAddress, defaultSSHPrivateKey, masterSoftwareList) - if err != nil { - fmt.Printf("Error occured while installing master software in host %s, ERROR: %v\n", hostLabel, err) - } - } else if strings.Contains(hostLabel, workerTag) { - // Its a worker node, install worker software - err = softwareInstaller(ipAddress, defaultSSHPrivateKey, workerSoftwareList) - if err != nil { - fmt.Printf("Error occured while installing worker software in host %s, ERROR: %v\n", hostLabel, err) - } - - } - - } - - return reconcile.Result{}, nil -} - -//Function to Get List containing baremetal hosts -func listBareMetalHosts(bmhDynamicClient dynamic.Interface) (*unstructured.UnstructuredList, error) { - - //Create GVR representing a BareMetalHost CR - bmhGVR := schema.GroupVersionResource{ - Group: "metal3.io", - Version: "v1alpha1", - Resource: "baremetalhosts", - } - - //Get List containing all BareMetalHosts CRs - bareMetalHosts, err := bmhDynamicClient.Resource(bmhGVR).List(metav1.ListOptions{}) - if err != nil { - fmt.Printf("Error occured, cannot get BareMetalHosts list, Error: %v\n", err) - return &unstructured.UnstructuredList{}, err - } - - return bareMetalHosts, nil -} - -//Function to check if BareMetalHost containing MAC address exist -func checkMACaddress(bareMetalHostList *unstructured.UnstructuredList, macAddress string) (bool, string) { - - //Convert macAddress to byte array for comparison - macAddressByte := []byte(macAddress) - macBool := false - - for _, bareMetalHost := range bareMetalHostList.Items { - bmhJson, _ := bareMetalHost.MarshalJSON() - - macBool = bytes.Contains(bmhJson, macAddressByte) - if macBool { - return macBool, bareMetalHost.GetName() - } - - } - - return macBool, "" - -} - -//Function to get the IP address of a host from the BareMetalHost resource -func getHostIPaddress(bareMetalHostList *unstructured.UnstructuredList, macAddress string) (string, error) { - - for _, bareMetalHost := range bareMetalHostList.Items { - status, ok := bareMetalHost.Object["status"].(map[string]interface{}) - if !ok { - continue - } - hardware, ok := status["hardware"].(map[string]interface{}) - if !ok { - continue - } - nics, ok := hardware["nics"].([]interface{}) - if !ok { - continue - } - for _, nic := range nics { - n, ok := nic.(map[string]interface{}) - if !ok { - continue - } - ip, ok := n["ip"].(string) - if !ok { - continue - } - if macAddress == n["mac"] { - return ip, nil - } - } - } - return "", nil -} - -//Function to create configmap -func createConfigMap(data, labels map[string]string, namespace string, clientset kubernetes.Interface) error { - - configmapClient := clientset.CoreV1().ConfigMaps(namespace) - - configmap := &corev1.ConfigMap{ - - ObjectMeta: metav1.ObjectMeta{ - Name: labels["cluster"] + "-configmap", - Labels: labels, - }, - Data: data, - } - - _, err := configmapClient.Create(configmap) - if err != nil { - return err - - } - return nil - -} - -//Function to get configmap Data -func getConfigMapData(namespace, clusterName string, clientset kubernetes.Interface) (map[string]string, error) { - - configmapClient := clientset.CoreV1().ConfigMaps(namespace) - configmapName := clusterName + "-configmap" - clusterConfigmap, err := configmapClient.Get(configmapName, metav1.GetOptions{}) - if err != nil { - return nil, err - } - - configmapData := clusterConfigmap.Data - return configmapData, nil -} - -//Function to create job for KUD installation -func createKUDinstallerJob(clusterName, namespace string, labels map[string]string, podSubnet string, kudPlugins []string, clientset kubernetes.Interface) error { - - var backOffLimit int32 = 0 - var privi bool = true - - installerString := " ./installer --cluster " + clusterName - if len(podSubnet) > 0 { - installerString += " --network " + podSubnet - } - - // Check if any plugin was specified - if len(kudPlugins) > 0 { - plugins := " --plugins" - - for _, plug := range kudPlugins { - plugins += " " + plug - } - - installerString += plugins - } - - jobClient := clientset.BatchV1().Jobs("default") - - job := &batchv1.Job{ - - ObjectMeta: metav1.ObjectMeta{ - Name: "kud-" + clusterName, - Labels: labels, - }, - Spec: batchv1.JobSpec{ - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: labels, - }, - - Spec: corev1.PodSpec{ - HostNetwork: true, - Containers: []corev1.Container{{ - Name: "kud", - Image: "github.com/onap/multicloud-k8s:latest", - ImagePullPolicy: "IfNotPresent", - VolumeMounts: []corev1.VolumeMount{{ - Name: "multi-cluster", - MountPath: "/opt/kud/multi-cluster", - }, - { - Name: "secret-volume", - MountPath: "/.ssh", - }, - }, - EnvFrom: []corev1.EnvFromSource{ - { - ConfigMapRef: &corev1.ConfigMapEnvSource{LocalObjectReference: corev1.LocalObjectReference{Name: "kud-installer"}}, - }, - }, - Command: []string{"/bin/sh", "-c"}, - Args: []string{"cp -r /.ssh /root/; chmod -R 600 /root/.ssh;" + installerString}, - SecurityContext: &corev1.SecurityContext{ - Privileged: &privi, - }, - }, - }, - Volumes: []corev1.Volume{{ - Name: "multi-cluster", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/opt/kud/multi-cluster", - }}}, - { - Name: "secret-volume", - VolumeSource: corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{ - SecretName: "ssh-key-secret", - }, - }}}, - RestartPolicy: "Never", - }, - }, - BackoffLimit: &backOffLimit, - }, - } - _, err := jobClient.Create(job) - if err != nil { - fmt.Printf("ERROR occured while creating job to install KUD\n ERROR:%v", err) - return err - } - return nil - -} - -//Function to Check if job succeeded -func checkJob(clusterName, namespace string, data, labels map[string]string, clientset kubernetes.Interface) { - - fmt.Printf("\nChecking job status for cluster %s\n", clusterName) - jobName := "kud-" + clusterName - jobClient := clientset.BatchV1().Jobs(namespace) - - for { - time.Sleep(2 * time.Second) - - job, err := jobClient.Get(jobName, metav1.GetOptions{}) - if err != nil { - fmt.Printf("ERROR: %v occured while retrieving job: %s", err, jobName) - return - } - jobSucceeded := job.Status.Succeeded - jobFailed := job.Status.Failed - - if jobSucceeded == 1 { - fmt.Printf("\n Job succeeded, KUD successfully installed in Cluster %s\n", clusterName) - - //KUD was installed successfully create configmap to store IP address info for the cluster - err = createConfigMap(data, labels, namespace, clientset) - if err != nil { - fmt.Printf("Error occured while creating Ip address configmap for cluster %v\n ERROR: %v", clusterName, err) - return - } - return - } - - if jobFailed == 1 { - fmt.Printf("\n Job Failed, KUD not installed in Cluster %s, check pod logs\n", clusterName) - return - } - - } - return - -} - -//Function to get software list from software CR -func getSoftwareList(softwareCR *bpav1alpha1.Software) (string, []interface{}, []interface{}) { - - CRclusterName := softwareCR.GetLabels()["cluster"] - - masterSofwareList := softwareCR.Spec.MasterSoftware - workerSoftwareList := softwareCR.Spec.WorkerSoftware - - return CRclusterName, masterSofwareList, workerSoftwareList -} - -//Function to install software in clusterHosts -func softwareInstaller(ipAddress, sshPrivateKey string, softwareList []interface{}) error { - - var installString string - for _, software := range softwareList { - - switch t := software.(type) { - case string: - installString += software.(string) + " " - case interface{}: - softwareMap, errBool := software.(map[string]interface{}) - if !errBool { - fmt.Printf("Error occured, cannot install software %v\n", software) - } - for softwareName, versionMap := range softwareMap { - - versionMAP, _ := versionMap.(map[string]interface{}) - version := versionMAP["version"].(string) - installString += softwareName + "=" + version + " " - } - default: - fmt.Printf("invalid format %v\n", t) - } - - } - - err := sshInstaller(installString, sshPrivateKey, ipAddress) - if err != nil { - return err - } - return nil - -} - -//Function to Run Installation commands via ssh -func sshInstaller(softwareString, sshPrivateKey, ipAddress string) error { - - buffer, err := ioutil.ReadFile(sshPrivateKey) - if err != nil { - return err - } - - key, err := ssh.ParsePrivateKey(buffer) - if err != nil { - return err - } - - sshConfig := &ssh.ClientConfig{ - User: "root", - Auth: []ssh.AuthMethod{ - ssh.PublicKeys(key), - }, - - HostKeyCallback: ssh.InsecureIgnoreHostKey(), - } - - client, err := ssh.Dial("tcp", ipAddress+":22", sshConfig) - if err != nil { - return err - } - - session, err := client.NewSession() - if err != nil { - return err - } - - defer session.Close() - defer client.Close() - - cmd := "sudo apt-get update && apt-get install " + softwareString + "-y" - err = session.Start(cmd) - - if err != nil { - return err - } - - return nil - -} - -func listVirtletVMs(clientset kubernetes.Interface) ([]VirtletVM, error) { - - var vmPodList []VirtletVM - - pods, err := clientset.CoreV1().Pods("").List(metav1.ListOptions{}) - if err != nil { - fmt.Printf("Could not get pod info, Error: %v\n", err) - return []VirtletVM{}, err - } - - for _, pod := range pods.Items { - var podAnnotation map[string]interface{} - var podStatus corev1.PodStatus - var podDefaultNetStatus []NetworksStatus - - annotation, err := json.Marshal(pod.ObjectMeta.GetAnnotations()) - if err != nil { - fmt.Printf("Could not get pod annotations, Error: %v\n", err) - return []VirtletVM{}, err - } - - json.Unmarshal([]byte(annotation), &podAnnotation) - if podAnnotation != nil && podAnnotation["kubernetes.io/target-runtime"] != nil { - runtime := podAnnotation["kubernetes.io/target-runtime"].(string) - - podStatusJson, _ := json.Marshal(pod.Status) - json.Unmarshal([]byte(podStatusJson), &podStatus) - - if runtime == "virtlet.cloud" && podStatus.Phase == "Running" && podAnnotation["k8s.v1.cni.cncf.io/networks-status"] != nil { - ns := podAnnotation["k8s.v1.cni.cncf.io/networks-status"].(string) - json.Unmarshal([]byte(ns), &podDefaultNetStatus) - - vmPodList = append(vmPodList, VirtletVM{podStatus.PodIP, podDefaultNetStatus[0].Mac}) - } - } - } - - return vmPodList, nil -} - -func getVMIPaddress(vmList []VirtletVM, macAddress string) (string, error) { - - for i := 0; i < len(vmList); i++ { - if vmList[i].MACaddress == macAddress { - return vmList[i].IPaddress, nil - } - } - return "", nil -} diff --git a/cmd/bpa-operator/pkg/controller/provisioning/provisioning_controller_test.go b/cmd/bpa-operator/pkg/controller/provisioning/provisioning_controller_test.go deleted file mode 100644 index 6bd18b1..0000000 --- a/cmd/bpa-operator/pkg/controller/provisioning/provisioning_controller_test.go +++ /dev/null @@ -1,205 +0,0 @@ -package provisioning - -import ( - "os" - "testing" - - bpav1alpha1 "github.com/bpa-operator/pkg/apis/bpa/v1alpha1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - fakedynamic "k8s.io/client-go/dynamic/fake" - fakeclientset "k8s.io/client-go/kubernetes/fake" - "k8s.io/client-go/kubernetes/scheme" - "sigs.k8s.io/controller-runtime/pkg/client/fake" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" -) - -func TestProvisioningController(t *testing.T) { - - logf.SetLogger(logf.ZapLogger(true)) - bpaName1 := "bpa-test-cr" - bpaName2 := "bpa-test-2" - namespace := "default" - clusterName := "test-cluster" - clusterName2 := "test-cluster-2" - clusterName3 := "test-cluster-3" - macAddress1 := "08:00:27:00:ab:2c" - macAddress2 := "08:00:27:00:ab:3d" - - // Create Fake baremetalhost - bmhList := newBMList() - - // Create Fake Provisioning CR - provisioning := newBPA(bpaName1, namespace, clusterName, macAddress1) - provisioning2 := newBPA(bpaName2, namespace, clusterName2, macAddress2) - - // Objects to track in the fake Client - objs := []runtime.Object{provisioning, provisioning2} - - // Register operator types with the runtime scheme - sc := scheme.Scheme - - sc.AddKnownTypes(bpav1alpha1.SchemeGroupVersion, provisioning, provisioning2) - - // Create Fake Clients and Clientset - fakeClient := fake.NewFakeClient(objs...) - fakeDyn := fakedynamic.NewSimpleDynamicClient(sc, bmhList) - fakeClientSet := fakeclientset.NewSimpleClientset() - - r := &ReconcileProvisioning{client: fakeClient, scheme: sc, clientset: fakeClientSet, bmhClient: fakeDyn} - - // Mock request to simulate Reconcile() being called on an event for a watched resource - req := simulateRequest(provisioning) - _, err := r.Reconcile(req) - if err != nil { - t.Fatalf("reconcile: (%v)", err) - } - - // Test 1: Check the job was created with the expected name - jobClient := r.clientset.BatchV1().Jobs(namespace) - job, err := jobClient.Get("kud-test-cluster", metav1.GetOptions{}) - - if err != nil { - t.Fatalf("Error occured while getting job: (%v)", err) - } - - // Test 2: Check that cluster name metadata in job is the expected cluster name - jobClusterName := job.Labels["cluster"] - if jobClusterName != clusterName { - t.Fatalf("Job cluster Name is wrong") - } - - // Test 3: Check that the right error is produced when host with MAC address does not exist - req = simulateRequest(provisioning2) - _, err = r.Reconcile(req) - expectedErr := "Host with MAC Address " + macAddress2 + " not found\n" - if err.Error() != expectedErr { - t.Fatalf("Failed, Unexpected error occured %v\n", err) - } - - // Delete cluster directories - err = os.RemoveAll("/multi-cluster/" + clusterName) - if err != nil { - t.Logf("\nUnable to delete cluster directory %s\n", clusterName) - } - err = os.RemoveAll("/multi-cluster/" + clusterName2) - if err != nil { - t.Logf("\nUnable to delete cluster directory %s\n", clusterName2) - } - err = os.RemoveAll("/multi-cluster/" + clusterName3) - if err != nil { - t.Logf("\nUnable to delete cluster directory %s\n", clusterName3) - } - -} - -func simulateRequest(bpaCR *bpav1alpha1.Provisioning) reconcile.Request { - namespacedName := types.NamespacedName{ - Name: bpaCR.ObjectMeta.Name, - Namespace: bpaCR.ObjectMeta.Namespace, - } - return reconcile.Request{NamespacedName: namespacedName} -} - -func newBPA(name, namespace, clusterName, macAddress string) *bpav1alpha1.Provisioning { - - provisioningCR := &bpav1alpha1.Provisioning{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Labels: map[string]string{ - "cluster": clusterName, - }, - }, - Spec: bpav1alpha1.ProvisioningSpec{ - Masters: []map[string]bpav1alpha1.Master{ - map[string]bpav1alpha1.Master{ - "test-master": bpav1alpha1.Master{ - MACaddress: macAddress, - }, - }, - }, - }, - } - return provisioningCR -} - -func newBMList() *unstructured.UnstructuredList { - - bmMap := map[string]interface{}{ - "apiVersion": "metal3.io/v1alpha1", - "kind": "BareMetalHostList", - "metaData": map[string]interface{}{ - "continue": "", - "resourceVersion": "11830058", - "selfLink": "/apis/metal3.io/v1alpha1/baremetalhosts", - }, - } - - metaData := map[string]interface{}{ - "creationTimestamp": "2019-10-24T04:51:15Z", - "generation": "1", - "name": "fake-test-bmh", - "namespace": "default", - "resourceVersion": "11829263", - "selfLink": "/apis/metal3.io/v1alpha1/namespaces/default/baremetalhosts/bpa-test-bmh", - "uid": "e92cb312-f619-11e9-90bc-00219ba0c77a", - } - - nicMap1 := map[string]interface{}{ - "ip": "192.168.50.63", - "mac": "08:00:27:00:ab:2c", - "model": "0x8086 0x1572", - "name": "eth3", - "pxe": "false", - "speedGbps": "0", - "vlanId": "0", - } - - nicMap2 := map[string]interface{}{ - "ip": "192.168.60.73", - "mac": "08:00:27:00:ab:1c", - "model": "0x8086 0x1572", - "name": "eth4", - "pxe": "false", - "speedGbps": "0", - "vlanId": "0", - } - - specMap := map[string]interface{}{ - "bootMACAddress": "08:00:27:00:ab:2c", - } - - statusMap := map[string]interface{}{ - "errorMessage": "", - "hardware": map[string]interface{}{ - "nics": []interface{}{ - nicMap1, - nicMap2, - }, - }, - } - - itemMap := map[string]interface{}{ - "apiVersion": "metal3.io/v1alpha1", - "kind": "BareMetalHost", - "metadata": metaData, - "spec": specMap, - "status": statusMap, - } - itemU := unstructured.Unstructured{ - Object: itemMap, - } - - itemsList := []unstructured.Unstructured{itemU} - - bmhList := &unstructured.UnstructuredList{ - Object: bmMap, - Items: itemsList, - } - - return bmhList -} diff --git a/cmd/bpa-operator/tools.go b/cmd/bpa-operator/tools.go deleted file mode 100644 index 648413f..0000000 --- a/cmd/bpa-operator/tools.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build tools - -package tools - -import ( - _ "sigs.k8s.io/controller-tools/pkg/crd/generator" -) diff --git a/cmd/bpa-operator/vendor/cloud.google.com/go/AUTHORS b/cmd/bpa-operator/vendor/cloud.google.com/go/AUTHORS deleted file mode 100644 index c364af1..0000000 --- a/cmd/bpa-operator/vendor/cloud.google.com/go/AUTHORS +++ /dev/null @@ -1,15 +0,0 @@ -# This is the official list of cloud authors for copyright purposes. -# This file is distinct from the CONTRIBUTORS files. -# See the latter for an explanation. - -# Names should be added to this file as: -# Name or Organization -# The email address is not required for organizations. - -Filippo Valsorda -Google Inc. -Ingo Oeser -Palm Stone Games, Inc. -Paweł Knap -Péter Szilágyi -Tyler Treat diff --git a/cmd/bpa-operator/vendor/cloud.google.com/go/CONTRIBUTORS b/cmd/bpa-operator/vendor/cloud.google.com/go/CONTRIBUTORS deleted file mode 100644 index 3b3cbed..0000000 --- a/cmd/bpa-operator/vendor/cloud.google.com/go/CONTRIBUTORS +++ /dev/null @@ -1,40 +0,0 @@ -# People who have agreed to one of the CLAs and can contribute patches. -# The AUTHORS file lists the copyright holders; this file -# lists people. For example, Google employees are listed here -# but not in AUTHORS, because Google holds the copyright. -# -# https://developers.google.com/open-source/cla/individual -# https://developers.google.com/open-source/cla/corporate -# -# Names should be added to this file as: -# Name - -# Keep the list alphabetically sorted. - -Alexis Hunt -Andreas Litt -Andrew Gerrand -Brad Fitzpatrick -Burcu Dogan -Dave Day -David Sansome -David Symonds -Filippo Valsorda -Glenn Lewis -Ingo Oeser -James Hall -Johan Euphrosine -Jonathan Amsterdam -Kunpei Sakai -Luna Duclos -Magnus Hiie -Mario Castro -Michael McGreevy -Omar Jarjur -Paweł Knap -Péter Szilágyi -Sarah Adams -Thanatat Tamtan -Toby Burress -Tuo Shan -Tyler Treat diff --git a/cmd/bpa-operator/vendor/cloud.google.com/go/LICENSE b/cmd/bpa-operator/vendor/cloud.google.com/go/LICENSE deleted file mode 100644 index d645695..0000000 --- a/cmd/bpa-operator/vendor/cloud.google.com/go/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/cmd/bpa-operator/vendor/cloud.google.com/go/compute/metadata/metadata.go b/cmd/bpa-operator/vendor/cloud.google.com/go/compute/metadata/metadata.go deleted file mode 100644 index 125b703..0000000 --- a/cmd/bpa-operator/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ /dev/null @@ -1,513 +0,0 @@ -// Copyright 2014 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package metadata provides access to Google Compute Engine (GCE) -// metadata and API service accounts. -// -// This package is a wrapper around the GCE metadata service, -// as documented at https://developers.google.com/compute/docs/metadata. -package metadata // import "cloud.google.com/go/compute/metadata" - -import ( - "context" - "encoding/json" - "fmt" - "io/ioutil" - "net" - "net/http" - "net/url" - "os" - "runtime" - "strings" - "sync" - "time" -) - -const ( - // metadataIP is the documented metadata server IP address. - metadataIP = "169.254.169.254" - - // metadataHostEnv is the environment variable specifying the - // GCE metadata hostname. If empty, the default value of - // metadataIP ("169.254.169.254") is used instead. - // This is variable name is not defined by any spec, as far as - // I know; it was made up for the Go package. - metadataHostEnv = "GCE_METADATA_HOST" - - userAgent = "gcloud-golang/0.1" -) - -type cachedValue struct { - k string - trim bool - mu sync.Mutex - v string -} - -var ( - projID = &cachedValue{k: "project/project-id", trim: true} - projNum = &cachedValue{k: "project/numeric-project-id", trim: true} - instID = &cachedValue{k: "instance/id", trim: true} -) - -var ( - defaultClient = &Client{hc: &http.Client{ - Transport: &http.Transport{ - Dial: (&net.Dialer{ - Timeout: 2 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - ResponseHeaderTimeout: 2 * time.Second, - }, - }} - subscribeClient = &Client{hc: &http.Client{ - Transport: &http.Transport{ - Dial: (&net.Dialer{ - Timeout: 2 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - }, - }} -) - -// NotDefinedError is returned when requested metadata is not defined. -// -// The underlying string is the suffix after "/computeMetadata/v1/". -// -// This error is not returned if the value is defined to be the empty -// string. -type NotDefinedError string - -func (suffix NotDefinedError) Error() string { - return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix)) -} - -func (c *cachedValue) get(cl *Client) (v string, err error) { - defer c.mu.Unlock() - c.mu.Lock() - if c.v != "" { - return c.v, nil - } - if c.trim { - v, err = cl.getTrimmed(c.k) - } else { - v, err = cl.Get(c.k) - } - if err == nil { - c.v = v - } - return -} - -var ( - onGCEOnce sync.Once - onGCE bool -) - -// OnGCE reports whether this process is running on Google Compute Engine. -func OnGCE() bool { - onGCEOnce.Do(initOnGCE) - return onGCE -} - -func initOnGCE() { - onGCE = testOnGCE() -} - -func testOnGCE() bool { - // The user explicitly said they're on GCE, so trust them. - if os.Getenv(metadataHostEnv) != "" { - return true - } - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - resc := make(chan bool, 2) - - // Try two strategies in parallel. - // See https://github.com/googleapis/google-cloud-go/issues/194 - go func() { - req, _ := http.NewRequest("GET", "http://"+metadataIP, nil) - req.Header.Set("User-Agent", userAgent) - res, err := defaultClient.hc.Do(req.WithContext(ctx)) - if err != nil { - resc <- false - return - } - defer res.Body.Close() - resc <- res.Header.Get("Metadata-Flavor") == "Google" - }() - - go func() { - addrs, err := net.LookupHost("metadata.google.internal") - if err != nil || len(addrs) == 0 { - resc <- false - return - } - resc <- strsContains(addrs, metadataIP) - }() - - tryHarder := systemInfoSuggestsGCE() - if tryHarder { - res := <-resc - if res { - // The first strategy succeeded, so let's use it. - return true - } - // Wait for either the DNS or metadata server probe to - // contradict the other one and say we are running on - // GCE. Give it a lot of time to do so, since the system - // info already suggests we're running on a GCE BIOS. - timer := time.NewTimer(5 * time.Second) - defer timer.Stop() - select { - case res = <-resc: - return res - case <-timer.C: - // Too slow. Who knows what this system is. - return false - } - } - - // There's no hint from the system info that we're running on - // GCE, so use the first probe's result as truth, whether it's - // true or false. The goal here is to optimize for speed for - // users who are NOT running on GCE. We can't assume that - // either a DNS lookup or an HTTP request to a blackholed IP - // address is fast. Worst case this should return when the - // metaClient's Transport.ResponseHeaderTimeout or - // Transport.Dial.Timeout fires (in two seconds). - return <-resc -} - -// systemInfoSuggestsGCE reports whether the local system (without -// doing network requests) suggests that we're running on GCE. If this -// returns true, testOnGCE tries a bit harder to reach its metadata -// server. -func systemInfoSuggestsGCE() bool { - if runtime.GOOS != "linux" { - // We don't have any non-Linux clues available, at least yet. - return false - } - slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name") - name := strings.TrimSpace(string(slurp)) - return name == "Google" || name == "Google Compute Engine" -} - -// Subscribe calls Client.Subscribe on a client designed for subscribing (one with no -// ResponseHeaderTimeout). -func Subscribe(suffix string, fn func(v string, ok bool) error) error { - return subscribeClient.Subscribe(suffix, fn) -} - -// Get calls Client.Get on the default client. -func Get(suffix string) (string, error) { return defaultClient.Get(suffix) } - -// ProjectID returns the current instance's project ID string. -func ProjectID() (string, error) { return defaultClient.ProjectID() } - -// NumericProjectID returns the current instance's numeric project ID. -func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() } - -// InternalIP returns the instance's primary internal IP address. -func InternalIP() (string, error) { return defaultClient.InternalIP() } - -// ExternalIP returns the instance's primary external (public) IP address. -func ExternalIP() (string, error) { return defaultClient.ExternalIP() } - -// Hostname returns the instance's hostname. This will be of the form -// ".c..internal". -func Hostname() (string, error) { return defaultClient.Hostname() } - -// InstanceTags returns the list of user-defined instance tags, -// assigned when initially creating a GCE instance. -func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() } - -// InstanceID returns the current VM's numeric instance ID. -func InstanceID() (string, error) { return defaultClient.InstanceID() } - -// InstanceName returns the current VM's instance ID string. -func InstanceName() (string, error) { return defaultClient.InstanceName() } - -// Zone returns the current VM's zone, such as "us-central1-b". -func Zone() (string, error) { return defaultClient.Zone() } - -// InstanceAttributes calls Client.InstanceAttributes on the default client. -func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() } - -// ProjectAttributes calls Client.ProjectAttributes on the default client. -func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() } - -// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client. -func InstanceAttributeValue(attr string) (string, error) { - return defaultClient.InstanceAttributeValue(attr) -} - -// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client. -func ProjectAttributeValue(attr string) (string, error) { - return defaultClient.ProjectAttributeValue(attr) -} - -// Scopes calls Client.Scopes on the default client. -func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) } - -func strsContains(ss []string, s string) bool { - for _, v := range ss { - if v == s { - return true - } - } - return false -} - -// A Client provides metadata. -type Client struct { - hc *http.Client -} - -// NewClient returns a Client that can be used to fetch metadata. All HTTP requests -// will use the given http.Client instead of the default client. -func NewClient(c *http.Client) *Client { - return &Client{hc: c} -} - -// getETag returns a value from the metadata service as well as the associated ETag. -// This func is otherwise equivalent to Get. -func (c *Client) getETag(suffix string) (value, etag string, err error) { - // Using a fixed IP makes it very difficult to spoof the metadata service in - // a container, which is an important use-case for local testing of cloud - // deployments. To enable spoofing of the metadata service, the environment - // variable GCE_METADATA_HOST is first inspected to decide where metadata - // requests shall go. - host := os.Getenv(metadataHostEnv) - if host == "" { - // Using 169.254.169.254 instead of "metadata" here because Go - // binaries built with the "netgo" tag and without cgo won't - // know the search suffix for "metadata" is - // ".google.internal", and this IP address is documented as - // being stable anyway. - host = metadataIP - } - u := "http://" + host + "/computeMetadata/v1/" + suffix - req, _ := http.NewRequest("GET", u, nil) - req.Header.Set("Metadata-Flavor", "Google") - req.Header.Set("User-Agent", userAgent) - res, err := c.hc.Do(req) - if err != nil { - return "", "", err - } - defer res.Body.Close() - if res.StatusCode == http.StatusNotFound { - return "", "", NotDefinedError(suffix) - } - all, err := ioutil.ReadAll(res.Body) - if err != nil { - return "", "", err - } - if res.StatusCode != 200 { - return "", "", &Error{Code: res.StatusCode, Message: string(all)} - } - return string(all), res.Header.Get("Etag"), nil -} - -// Get returns a value from the metadata service. -// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". -// -// If the GCE_METADATA_HOST environment variable is not defined, a default of -// 169.254.169.254 will be used instead. -// -// If the requested metadata is not defined, the returned error will -// be of type NotDefinedError. -func (c *Client) Get(suffix string) (string, error) { - val, _, err := c.getETag(suffix) - return val, err -} - -func (c *Client) getTrimmed(suffix string) (s string, err error) { - s, err = c.Get(suffix) - s = strings.TrimSpace(s) - return -} - -func (c *Client) lines(suffix string) ([]string, error) { - j, err := c.Get(suffix) - if err != nil { - return nil, err - } - s := strings.Split(strings.TrimSpace(j), "\n") - for i := range s { - s[i] = strings.TrimSpace(s[i]) - } - return s, nil -} - -// ProjectID returns the current instance's project ID string. -func (c *Client) ProjectID() (string, error) { return projID.get(c) } - -// NumericProjectID returns the current instance's numeric project ID. -func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) } - -// InstanceID returns the current VM's numeric instance ID. -func (c *Client) InstanceID() (string, error) { return instID.get(c) } - -// InternalIP returns the instance's primary internal IP address. -func (c *Client) InternalIP() (string, error) { - return c.getTrimmed("instance/network-interfaces/0/ip") -} - -// ExternalIP returns the instance's primary external (public) IP address. -func (c *Client) ExternalIP() (string, error) { - return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip") -} - -// Hostname returns the instance's hostname. This will be of the form -// ".c..internal". -func (c *Client) Hostname() (string, error) { - return c.getTrimmed("instance/hostname") -} - -// InstanceTags returns the list of user-defined instance tags, -// assigned when initially creating a GCE instance. -func (c *Client) InstanceTags() ([]string, error) { - var s []string - j, err := c.Get("instance/tags") - if err != nil { - return nil, err - } - if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil { - return nil, err - } - return s, nil -} - -// InstanceName returns the current VM's instance ID string. -func (c *Client) InstanceName() (string, error) { - host, err := c.Hostname() - if err != nil { - return "", err - } - return strings.Split(host, ".")[0], nil -} - -// Zone returns the current VM's zone, such as "us-central1-b". -func (c *Client) Zone() (string, error) { - zone, err := c.getTrimmed("instance/zone") - // zone is of the form "projects//zones/". - if err != nil { - return "", err - } - return zone[strings.LastIndex(zone, "/")+1:], nil -} - -// InstanceAttributes returns the list of user-defined attributes, -// assigned when initially creating a GCE VM instance. The value of an -// attribute can be obtained with InstanceAttributeValue. -func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") } - -// ProjectAttributes returns the list of user-defined attributes -// applying to the project as a whole, not just this VM. The value of -// an attribute can be obtained with ProjectAttributeValue. -func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") } - -// InstanceAttributeValue returns the value of the provided VM -// instance attribute. -// -// If the requested attribute is not defined, the returned error will -// be of type NotDefinedError. -// -// InstanceAttributeValue may return ("", nil) if the attribute was -// defined to be the empty string. -func (c *Client) InstanceAttributeValue(attr string) (string, error) { - return c.Get("instance/attributes/" + attr) -} - -// ProjectAttributeValue returns the value of the provided -// project attribute. -// -// If the requested attribute is not defined, the returned error will -// be of type NotDefinedError. -// -// ProjectAttributeValue may return ("", nil) if the attribute was -// defined to be the empty string. -func (c *Client) ProjectAttributeValue(attr string) (string, error) { - return c.Get("project/attributes/" + attr) -} - -// Scopes returns the service account scopes for the given account. -// The account may be empty or the string "default" to use the instance's -// main account. -func (c *Client) Scopes(serviceAccount string) ([]string, error) { - if serviceAccount == "" { - serviceAccount = "default" - } - return c.lines("instance/service-accounts/" + serviceAccount + "/scopes") -} - -// Subscribe subscribes to a value from the metadata service. -// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". -// The suffix may contain query parameters. -// -// Subscribe calls fn with the latest metadata value indicated by the provided -// suffix. If the metadata value is deleted, fn is called with the empty string -// and ok false. Subscribe blocks until fn returns a non-nil error or the value -// is deleted. Subscribe returns the error value returned from the last call to -// fn, which may be nil when ok == false. -func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) error { - const failedSubscribeSleep = time.Second * 5 - - // First check to see if the metadata value exists at all. - val, lastETag, err := c.getETag(suffix) - if err != nil { - return err - } - - if err := fn(val, true); err != nil { - return err - } - - ok := true - if strings.ContainsRune(suffix, '?') { - suffix += "&wait_for_change=true&last_etag=" - } else { - suffix += "?wait_for_change=true&last_etag=" - } - for { - val, etag, err := c.getETag(suffix + url.QueryEscape(lastETag)) - if err != nil { - if _, deleted := err.(NotDefinedError); !deleted { - time.Sleep(failedSubscribeSleep) - continue // Retry on other errors. - } - ok = false - } - lastETag = etag - - if err := fn(val, ok); err != nil || !ok { - return err - } - } -} - -// Error contains an error response from the server. -type Error struct { - // Code is the HTTP response status code. - Code int - // Message is the server response message. - Message string -} - -func (e *Error) Error() string { - return fmt.Sprintf("compute: Received %d `%s`", e.Code, e.Message) -} diff --git a/cmd/bpa-operator/vendor/contrib.go.opencensus.io/exporter/ocagent/.travis.yml b/cmd/bpa-operator/vendor/contrib.go.opencensus.io/exporter/ocagent/.travis.yml deleted file mode 100644 index ee417bb..0000000 --- a/cmd/bpa-operator/vendor/contrib.go.opencensus.io/exporter/ocagent/.travis.yml +++ /dev/null @@ -1,18 +0,0 @@ -language: go - -go: - - 1.11.x - -go_import_path: contrib.go.opencensus.io/exporter/ocagent - -before_script: - - GO_FILES=$(find . -iname '*.go' | grep -v /vendor/) # All the .go files, excluding vendor/ if any - - PKGS=$(go list ./... | grep -v /vendor/) # All the import paths, excluding vendor/ if any - -script: - - go build ./... # Ensure dependency updates don't break build - - if [ -n "$(gofmt -s -l $GO_FILES)" ]; then echo "gofmt the following files:"; gofmt -s -l $GO_FILES; exit 1; fi - - go vet ./... - - GO111MODULE=on go test -v -race $PKGS # Run all the tests with the race detector enabled - - GO111MODULE=off go test -v -race $PKGS # Make sure tests still pass when not using Go modules. - - 'if [[ $TRAVIS_GO_VERSION = 1.8* ]]; then ! golint ./... | grep -vE "(_mock|_string|\.pb)\.go:"; fi' diff --git a/cmd/bpa-operator/vendor/contrib.go.opencensus.io/exporter/ocagent/CONTRIBUTING.md b/cmd/bpa-operator/vendor/contrib.go.opencensus.io/exporter/ocagent/CONTRIBUTING.md deleted file mode 100644 index 0786fdf..0000000 --- a/cmd/bpa-operator/vendor/contrib.go.opencensus.io/exporter/ocagent/CONTRIBUTING.md +++ /dev/null @@ -1,24 +0,0 @@ -# How to contribute - -We'd love to accept your patches and contributions to this project. There are -just a few small guidelines you need to follow. - -## Contributor License Agreement - -Contributions to this project must be accompanied by a Contributor License -Agreement. You (or your employer) retain the copyright to your contribution, -this simply gives us permission to use and redistribute your contributions as -part of the project. Head over to to see -your current agreements on file or to sign a new one. - -You generally only need to submit a CLA once, so if you've already submitted one -(even if it was for a different project), you probably don't need to do it -again. - -## Code reviews - -All submissions, including submissions by project members, require review. We -use GitHub pull requests for this purpose. Consult [GitHub Help] for more -information on using pull requests. - -[GitHub Help]: https://help.github.com/articles/about-pull-requests/ diff --git a/cmd/bpa-operator/vendor/contrib.go.opencensus.io/exporter/ocagent/LICENSE b/cmd/bpa-operator/vendor/contrib.go.opencensus.io/exporter/ocagent/LICENSE deleted file mode 100644 index 261eeb9..0000000 --- a/cmd/bpa-operator/vendor/contrib.go.opencensus.io/exporter/ocagent/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/cmd/bpa-operator/vendor/contrib.go.opencensus.io/exporter/ocagent/README.md b/cmd/bpa-operator/vendor/contrib.go.opencensus.io/exporter/ocagent/README.md deleted file mode 100644 index 3b9e908..0000000 --- a/cmd/bpa-operator/vendor/contrib.go.opencensus.io/exporter/ocagent/README.md +++ /dev/null @@ -1,61 +0,0 @@ -# OpenCensus Agent Go Exporter - -[![Build Status][travis-image]][travis-url] [![GoDoc][godoc-image]][godoc-url] - - -This repository contains the Go implementation of the OpenCensus Agent (OC-Agent) Exporter. -OC-Agent is a deamon process running in a VM that can retrieve spans/stats/metrics from -OpenCensus Library, export them to other backends and possibly push configurations back to -Library. See more details on [OC-Agent Readme][OCAgentReadme]. - -Note: This is an experimental repository and is likely to get backwards-incompatible changes. -Ultimately we may want to move the OC-Agent Go Exporter to [OpenCensus Go core library][OpenCensusGo]. - -## Installation - -```bash -$ go get -u contrib.go.opencensus.io/exporter/ocagent -``` - -## Usage - -```go -import ( - "context" - "fmt" - "log" - "time" - - "contrib.go.opencensus.io/exporter/ocagent" - "go.opencensus.io/trace" -) - -func Example() { - exp, err := ocagent.NewExporter(ocagent.WithInsecure(), ocagent.WithServiceName("your-service-name")) - if err != nil { - log.Fatalf("Failed to create the agent exporter: %v", err) - } - defer exp.Stop() - - // Now register it as a trace exporter. - trace.RegisterExporter(exp) - - // Then use the OpenCensus tracing library, like we normally would. - ctx, span := trace.StartSpan(context.Background(), "AgentExporter-Example") - defer span.End() - - for i := 0; i < 10; i++ { - _, iSpan := trace.StartSpan(ctx, fmt.Sprintf("Sample-%d", i)) - <-time.After(6 * time.Millisecond) - iSpan.End() - } -} -``` - -[OCAgentReadme]: https://github.com/census-instrumentation/opencensus-proto/tree/master/opencensus/proto/agent#opencensus-agent-proto -[OpenCensusGo]: https://github.com/census-instrumentation/opencensus-go -[godoc-image]: https://godoc.org/contrib.go.opencensus.io/exporter/ocagent?status.svg -[godoc-url]: https://godoc.org/contrib.go.opencensus.io/exporter/ocagent -[travis-image]: https://travis-ci.org/census-ecosystem/opencensus-go-exporter-ocagent.svg?branch=master -[travis-url]: https://travis-ci.org/census-ecosystem/opencensus-go-exporter-ocagent - diff --git a/cmd/bpa-operator/vendor/contrib.go.opencensus.io/exporter/ocagent/common.go b/cmd/bpa-operator/vendor/contrib.go.opencensus.io/exporter/ocagent/common.go deleted file mode 100644 index 297e44b..0000000 --- a/cmd/bpa-operator/vendor/contrib.go.opencensus.io/exporter/ocagent/common.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ocagent - -import ( - "math/rand" - "time" -) - -var randSrc = rand.New(rand.NewSource(time.Now().UnixNano())) - -// retries function fn upto n times, if fn returns an error lest it returns nil early. -// It applies exponential backoff in units of (1< 0 { - ctx = metadata.NewOutgoingContext(ctx, metadata.New(ae.headers)) - } - traceExporter, err := traceSvcClient.Export(ctx) - if err != nil { - return fmt.Errorf("Exporter.Start:: TraceServiceClient: %v", err) - } - - firstTraceMessage := &agenttracepb.ExportTraceServiceRequest{ - Node: node, - Resource: ae.resource, - } - if err := traceExporter.Send(firstTraceMessage); err != nil { - return fmt.Errorf("Exporter.Start:: Failed to initiate the Config service: %v", err) - } - - ae.mu.Lock() - ae.traceExporter = traceExporter - ae.mu.Unlock() - - // Initiate the config service by sending over node identifier info. - configStream, err := traceSvcClient.Config(context.Background()) - if err != nil { - return fmt.Errorf("Exporter.Start:: ConfigStream: %v", err) - } - firstCfgMessage := &agenttracepb.CurrentLibraryConfig{Node: node} - if err := configStream.Send(firstCfgMessage); err != nil { - return fmt.Errorf("Exporter.Start:: Failed to initiate the Config service: %v", err) - } - - // In the background, handle trace configurations that are beamed down - // by the agent, but also reply to it with the applied configuration. - go ae.handleConfigStreaming(configStream) - - return nil -} - -func (ae *Exporter) createMetricsServiceConnection(cc *grpc.ClientConn, node *commonpb.Node) error { - metricsSvcClient := agentmetricspb.NewMetricsServiceClient(cc) - metricsExporter, err := metricsSvcClient.Export(context.Background()) - if err != nil { - return fmt.Errorf("MetricsExporter: failed to start the service client: %v", err) - } - // Initiate the metrics service by sending over the first message just containing the Node and Resource. - firstMetricsMessage := &agentmetricspb.ExportMetricsServiceRequest{ - Node: node, - Resource: ae.resource, - } - if err := metricsExporter.Send(firstMetricsMessage); err != nil { - return fmt.Errorf("MetricsExporter:: failed to send the first message: %v", err) - } - - ae.mu.Lock() - ae.metricsExporter = metricsExporter - ae.mu.Unlock() - - // With that we are good to go and can start sending metrics - return nil -} - -func (ae *Exporter) dialToAgent() (*grpc.ClientConn, error) { - addr := ae.prepareAgentAddress() - var dialOpts []grpc.DialOption - if ae.clientTransportCredentials != nil { - dialOpts = append(dialOpts, grpc.WithTransportCredentials(ae.clientTransportCredentials)) - } else if ae.canDialInsecure { - dialOpts = append(dialOpts, grpc.WithInsecure()) - } - if ae.compressor != "" { - dialOpts = append(dialOpts, grpc.WithDefaultCallOptions(grpc.UseCompressor(ae.compressor))) - } - dialOpts = append(dialOpts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{})) - - ctx := context.Background() - if len(ae.headers) > 0 { - ctx = metadata.NewOutgoingContext(ctx, metadata.New(ae.headers)) - } - return grpc.DialContext(ctx, addr, dialOpts...) -} - -func (ae *Exporter) handleConfigStreaming(configStream agenttracepb.TraceService_ConfigClient) error { - // Note: We haven't yet implemented configuration sending so we - // should NOT be changing connection states within this function for now. - for { - recv, err := configStream.Recv() - if err != nil { - // TODO: Check if this is a transient error or exponential backoff-able. - return err - } - cfg := recv.Config - if cfg == nil { - continue - } - - // Otherwise now apply the trace configuration sent down from the agent - if psamp := cfg.GetProbabilitySampler(); psamp != nil { - trace.ApplyConfig(trace.Config{DefaultSampler: trace.ProbabilitySampler(psamp.SamplingProbability)}) - } else if csamp := cfg.GetConstantSampler(); csamp != nil { - alwaysSample := csamp.Decision == tracepb.ConstantSampler_ALWAYS_ON - if alwaysSample { - trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) - } else { - trace.ApplyConfig(trace.Config{DefaultSampler: trace.NeverSample()}) - } - } else { // TODO: Add the rate limiting sampler here - } - - // Then finally send back to upstream the newly applied configuration - err = configStream.Send(&agenttracepb.CurrentLibraryConfig{Config: &tracepb.TraceConfig{Sampler: cfg.Sampler}}) - if err != nil { - return err - } - } -} - -// Stop shuts down all the connections and resources -// related to the exporter. -func (ae *Exporter) Stop() error { - ae.mu.RLock() - cc := ae.grpcClientConn - started := ae.started - stopped := ae.stopped - ae.mu.RUnlock() - - if !started { - return errNotStarted - } - if stopped { - // TODO: tell the user that we've already stopped, so perhaps a sentinel error? - return nil - } - - ae.Flush() - - // Now close the underlying gRPC connection. - var err error - if cc != nil { - err = cc.Close() - } - - // At this point we can change the state variables: started and stopped - ae.mu.Lock() - ae.started = false - ae.stopped = true - ae.mu.Unlock() - close(ae.stopCh) - - // Ensure that the backgroundConnector returns - <-ae.backgroundConnectionDoneCh - - return err -} - -func (ae *Exporter) ExportSpan(sd *trace.SpanData) { - if sd == nil { - return - } - _ = ae.traceBundler.Add(sd, 1) -} - -func (ae *Exporter) ExportTraceServiceRequest(batch *agenttracepb.ExportTraceServiceRequest) error { - if batch == nil || len(batch.Spans) == 0 { - return nil - } - - select { - case <-ae.stopCh: - return errStopped - - default: - if !ae.connected() { - return errNoConnection - } - - ae.senderMu.Lock() - err := ae.traceExporter.Send(batch) - ae.senderMu.Unlock() - if err != nil { - ae.setStateDisconnected() - return err - } - return nil - } -} - -func (ae *Exporter) ExportView(vd *view.Data) { - if vd == nil { - return - } - _ = ae.viewDataBundler.Add(vd, 1) -} - -func ocSpanDataToPbSpans(sdl []*trace.SpanData) []*tracepb.Span { - if len(sdl) == 0 { - return nil - } - protoSpans := make([]*tracepb.Span, 0, len(sdl)) - for _, sd := range sdl { - if sd != nil { - protoSpans = append(protoSpans, ocSpanToProtoSpan(sd)) - } - } - return protoSpans -} - -func (ae *Exporter) uploadTraces(sdl []*trace.SpanData) { - select { - case <-ae.stopCh: - return - - default: - if !ae.connected() { - return - } - - protoSpans := ocSpanDataToPbSpans(sdl) - if len(protoSpans) == 0 { - return - } - ae.senderMu.Lock() - err := ae.traceExporter.Send(&agenttracepb.ExportTraceServiceRequest{ - Spans: protoSpans, - }) - ae.senderMu.Unlock() - if err != nil { - ae.setStateDisconnected() - } - } -} - -func ocViewDataToPbMetrics(vdl []*view.Data) []*metricspb.Metric { - if len(vdl) == 0 { - return nil - } - metrics := make([]*metricspb.Metric, 0, len(vdl)) - for _, vd := range vdl { - if vd != nil { - vmetric, err := viewDataToMetric(vd) - // TODO: (@odeke-em) somehow report this error, if it is non-nil. - if err == nil && vmetric != nil { - metrics = append(metrics, vmetric) - } - } - } - return metrics -} - -func (ae *Exporter) uploadViewData(vdl []*view.Data) { - select { - case <-ae.stopCh: - return - - default: - if !ae.connected() { - return - } - - protoMetrics := ocViewDataToPbMetrics(vdl) - if len(protoMetrics) == 0 { - return - } - err := ae.metricsExporter.Send(&agentmetricspb.ExportMetricsServiceRequest{ - Metrics: protoMetrics, - // TODO:(@odeke-em) - // a) Figure out how to derive a Node from the environment - // b) Figure out how to derive a Resource from the environment - // or better letting users of the exporter configure it. - }) - if err != nil { - ae.setStateDisconnected() - } - } -} - -func (ae *Exporter) Flush() { - ae.traceBundler.Flush() - ae.viewDataBundler.Flush() -} - -func resourceProtoFromEnv() *resourcepb.Resource { - rs, _ := resource.FromEnv(context.Background()) - if rs == nil { - return nil - } - - rprs := &resourcepb.Resource{ - Type: rs.Type, - } - if rs.Labels != nil { - rprs.Labels = make(map[string]string) - for k, v := range rs.Labels { - rprs.Labels[k] = v - } - } - return rprs -} diff --git a/cmd/bpa-operator/vendor/contrib.go.opencensus.io/exporter/ocagent/options.go b/cmd/bpa-operator/vendor/contrib.go.opencensus.io/exporter/ocagent/options.go deleted file mode 100644 index 3e05ae8..0000000 --- a/cmd/bpa-operator/vendor/contrib.go.opencensus.io/exporter/ocagent/options.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ocagent - -import ( - "time" - - "google.golang.org/grpc/credentials" -) - -const ( - DefaultAgentPort uint16 = 55678 - DefaultAgentHost string = "localhost" -) - -type ExporterOption interface { - withExporter(e *Exporter) -} - -type insecureGrpcConnection int - -var _ ExporterOption = (*insecureGrpcConnection)(nil) - -func (igc *insecureGrpcConnection) withExporter(e *Exporter) { - e.canDialInsecure = true -} - -// WithInsecure disables client transport security for the exporter's gRPC connection -// just like grpc.WithInsecure() https://godoc.org/google.golang.org/grpc#WithInsecure -// does. Note, by default, client security is required unless WithInsecure is used. -func WithInsecure() ExporterOption { return new(insecureGrpcConnection) } - -type addressSetter string - -func (as addressSetter) withExporter(e *Exporter) { - e.agentAddress = string(as) -} - -var _ ExporterOption = (*addressSetter)(nil) - -// WithAddress allows one to set the address that the exporter will -// connect to the agent on. If unset, it will instead try to use -// connect to DefaultAgentHost:DefaultAgentPort -func WithAddress(addr string) ExporterOption { - return addressSetter(addr) -} - -type serviceNameSetter string - -func (sns serviceNameSetter) withExporter(e *Exporter) { - e.serviceName = string(sns) -} - -var _ ExporterOption = (*serviceNameSetter)(nil) - -// WithServiceName allows one to set/override the service name -// that the exporter will report to the agent. -func WithServiceName(serviceName string) ExporterOption { - return serviceNameSetter(serviceName) -} - -type reconnectionPeriod time.Duration - -func (rp reconnectionPeriod) withExporter(e *Exporter) { - e.reconnectionPeriod = time.Duration(rp) -} - -func WithReconnectionPeriod(rp time.Duration) ExporterOption { - return reconnectionPeriod(rp) -} - -type compressorSetter string - -func (c compressorSetter) withExporter(e *Exporter) { - e.compressor = string(c) -} - -// UseCompressor will set the compressor for the gRPC client to use when sending requests. -// It is the responsibility of the caller to ensure that the compressor set has been registered -// with google.golang.org/grpc/encoding. This can be done by encoding.RegisterCompressor. Some -// compressors auto-register on import, such as gzip, which can be registered by calling -// `import _ "google.golang.org/grpc/encoding/gzip"` -func UseCompressor(compressorName string) ExporterOption { - return compressorSetter(compressorName) -} - -type headerSetter map[string]string - -func (h headerSetter) withExporter(e *Exporter) { - e.headers = map[string]string(h) -} - -// WithHeaders will send the provided headers when the gRPC stream connection -// is instantiated -func WithHeaders(headers map[string]string) ExporterOption { - return headerSetter(headers) -} - -type clientCredentials struct { - credentials.TransportCredentials -} - -var _ ExporterOption = (*clientCredentials)(nil) - -// WithTLSCredentials allows the connection to use TLS credentials -// when talking to the server. It takes in grpc.TransportCredentials instead -// of say a Certificate file or a tls.Certificate, because the retrieving -// these credentials can be done in many ways e.g. plain file, in code tls.Config -// or by certificate rotation, so it is up to the caller to decide what to use. -func WithTLSCredentials(creds credentials.TransportCredentials) ExporterOption { - return &clientCredentials{TransportCredentials: creds} -} - -func (cc *clientCredentials) withExporter(e *Exporter) { - e.clientTransportCredentials = cc.TransportCredentials -} diff --git a/cmd/bpa-operator/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_spans.go b/cmd/bpa-operator/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_spans.go deleted file mode 100644 index 983ebe7..0000000 --- a/cmd/bpa-operator/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_spans.go +++ /dev/null @@ -1,248 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ocagent - -import ( - "math" - "time" - - "go.opencensus.io/trace" - "go.opencensus.io/trace/tracestate" - - tracepb "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1" - "github.com/golang/protobuf/ptypes/timestamp" -) - -const ( - maxAnnotationEventsPerSpan = 32 - maxMessageEventsPerSpan = 128 -) - -func ocSpanToProtoSpan(sd *trace.SpanData) *tracepb.Span { - if sd == nil { - return nil - } - var namePtr *tracepb.TruncatableString - if sd.Name != "" { - namePtr = &tracepb.TruncatableString{Value: sd.Name} - } - return &tracepb.Span{ - TraceId: sd.TraceID[:], - SpanId: sd.SpanID[:], - ParentSpanId: sd.ParentSpanID[:], - Status: ocStatusToProtoStatus(sd.Status), - StartTime: timeToTimestamp(sd.StartTime), - EndTime: timeToTimestamp(sd.EndTime), - Links: ocLinksToProtoLinks(sd.Links), - Kind: ocSpanKindToProtoSpanKind(sd.SpanKind), - Name: namePtr, - Attributes: ocAttributesToProtoAttributes(sd.Attributes), - TimeEvents: ocTimeEventsToProtoTimeEvents(sd.Annotations, sd.MessageEvents), - Tracestate: ocTracestateToProtoTracestate(sd.Tracestate), - } -} - -var blankStatus trace.Status - -func ocStatusToProtoStatus(status trace.Status) *tracepb.Status { - if status == blankStatus { - return nil - } - return &tracepb.Status{ - Code: status.Code, - Message: status.Message, - } -} - -func ocLinksToProtoLinks(links []trace.Link) *tracepb.Span_Links { - if len(links) == 0 { - return nil - } - - sl := make([]*tracepb.Span_Link, 0, len(links)) - for _, ocLink := range links { - // This redefinition is necessary to prevent ocLink.*ID[:] copies - // being reused -- in short we need a new ocLink per iteration. - ocLink := ocLink - - sl = append(sl, &tracepb.Span_Link{ - TraceId: ocLink.TraceID[:], - SpanId: ocLink.SpanID[:], - Type: ocLinkTypeToProtoLinkType(ocLink.Type), - }) - } - - return &tracepb.Span_Links{ - Link: sl, - } -} - -func ocLinkTypeToProtoLinkType(oct trace.LinkType) tracepb.Span_Link_Type { - switch oct { - case trace.LinkTypeChild: - return tracepb.Span_Link_CHILD_LINKED_SPAN - case trace.LinkTypeParent: - return tracepb.Span_Link_PARENT_LINKED_SPAN - default: - return tracepb.Span_Link_TYPE_UNSPECIFIED - } -} - -func ocAttributesToProtoAttributes(attrs map[string]interface{}) *tracepb.Span_Attributes { - if len(attrs) == 0 { - return nil - } - outMap := make(map[string]*tracepb.AttributeValue) - for k, v := range attrs { - switch v := v.(type) { - case bool: - outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_BoolValue{BoolValue: v}} - - case int: - outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_IntValue{IntValue: int64(v)}} - - case int64: - outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_IntValue{IntValue: v}} - - case string: - outMap[k] = &tracepb.AttributeValue{ - Value: &tracepb.AttributeValue_StringValue{ - StringValue: &tracepb.TruncatableString{Value: v}, - }, - } - } - } - return &tracepb.Span_Attributes{ - AttributeMap: outMap, - } -} - -// This code is mostly copied from -// https://github.com/census-ecosystem/opencensus-go-exporter-stackdriver/blob/master/trace_proto.go#L46 -func ocTimeEventsToProtoTimeEvents(as []trace.Annotation, es []trace.MessageEvent) *tracepb.Span_TimeEvents { - if len(as) == 0 && len(es) == 0 { - return nil - } - - timeEvents := &tracepb.Span_TimeEvents{} - var annotations, droppedAnnotationsCount int - var messageEvents, droppedMessageEventsCount int - - // Transform annotations - for i, a := range as { - if annotations >= maxAnnotationEventsPerSpan { - droppedAnnotationsCount = len(as) - i - break - } - annotations++ - timeEvents.TimeEvent = append(timeEvents.TimeEvent, - &tracepb.Span_TimeEvent{ - Time: timeToTimestamp(a.Time), - Value: transformAnnotationToTimeEvent(&a), - }, - ) - } - - // Transform message events - for i, e := range es { - if messageEvents >= maxMessageEventsPerSpan { - droppedMessageEventsCount = len(es) - i - break - } - messageEvents++ - timeEvents.TimeEvent = append(timeEvents.TimeEvent, - &tracepb.Span_TimeEvent{ - Time: timeToTimestamp(e.Time), - Value: transformMessageEventToTimeEvent(&e), - }, - ) - } - - // Process dropped counter - timeEvents.DroppedAnnotationsCount = clip32(droppedAnnotationsCount) - timeEvents.DroppedMessageEventsCount = clip32(droppedMessageEventsCount) - - return timeEvents -} - -func transformAnnotationToTimeEvent(a *trace.Annotation) *tracepb.Span_TimeEvent_Annotation_ { - return &tracepb.Span_TimeEvent_Annotation_{ - Annotation: &tracepb.Span_TimeEvent_Annotation{ - Description: &tracepb.TruncatableString{Value: a.Message}, - Attributes: ocAttributesToProtoAttributes(a.Attributes), - }, - } -} - -func transformMessageEventToTimeEvent(e *trace.MessageEvent) *tracepb.Span_TimeEvent_MessageEvent_ { - return &tracepb.Span_TimeEvent_MessageEvent_{ - MessageEvent: &tracepb.Span_TimeEvent_MessageEvent{ - Type: tracepb.Span_TimeEvent_MessageEvent_Type(e.EventType), - Id: uint64(e.MessageID), - UncompressedSize: uint64(e.UncompressedByteSize), - CompressedSize: uint64(e.CompressedByteSize), - }, - } -} - -// clip32 clips an int to the range of an int32. -func clip32(x int) int32 { - if x < math.MinInt32 { - return math.MinInt32 - } - if x > math.MaxInt32 { - return math.MaxInt32 - } - return int32(x) -} - -func timeToTimestamp(t time.Time) *timestamp.Timestamp { - nanoTime := t.UnixNano() - return ×tamp.Timestamp{ - Seconds: nanoTime / 1e9, - Nanos: int32(nanoTime % 1e9), - } -} - -func ocSpanKindToProtoSpanKind(kind int) tracepb.Span_SpanKind { - switch kind { - case trace.SpanKindClient: - return tracepb.Span_CLIENT - case trace.SpanKindServer: - return tracepb.Span_SERVER - default: - return tracepb.Span_SPAN_KIND_UNSPECIFIED - } -} - -func ocTracestateToProtoTracestate(ts *tracestate.Tracestate) *tracepb.Span_Tracestate { - if ts == nil { - return nil - } - return &tracepb.Span_Tracestate{ - Entries: ocTracestateEntriesToProtoTracestateEntries(ts.Entries()), - } -} - -func ocTracestateEntriesToProtoTracestateEntries(entries []tracestate.Entry) []*tracepb.Span_Tracestate_Entry { - protoEntries := make([]*tracepb.Span_Tracestate_Entry, 0, len(entries)) - for _, entry := range entries { - protoEntries = append(protoEntries, &tracepb.Span_Tracestate_Entry{ - Key: entry.Key, - Value: entry.Value, - }) - } - return protoEntries -} diff --git a/cmd/bpa-operator/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_stats_to_metrics.go b/cmd/bpa-operator/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_stats_to_metrics.go deleted file mode 100644 index 43f18de..0000000 --- a/cmd/bpa-operator/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_stats_to_metrics.go +++ /dev/null @@ -1,274 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ocagent - -import ( - "errors" - "time" - - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" - - "github.com/golang/protobuf/ptypes/timestamp" - - metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" -) - -var ( - errNilMeasure = errors.New("expecting a non-nil stats.Measure") - errNilView = errors.New("expecting a non-nil view.View") - errNilViewData = errors.New("expecting a non-nil view.Data") -) - -func viewDataToMetric(vd *view.Data) (*metricspb.Metric, error) { - if vd == nil { - return nil, errNilViewData - } - - descriptor, err := viewToMetricDescriptor(vd.View) - if err != nil { - return nil, err - } - - timeseries, err := viewDataToTimeseries(vd) - if err != nil { - return nil, err - } - - metric := &metricspb.Metric{ - MetricDescriptor: descriptor, - Timeseries: timeseries, - } - return metric, nil -} - -func viewToMetricDescriptor(v *view.View) (*metricspb.MetricDescriptor, error) { - if v == nil { - return nil, errNilView - } - if v.Measure == nil { - return nil, errNilMeasure - } - - desc := &metricspb.MetricDescriptor{ - Name: stringOrCall(v.Name, v.Measure.Name), - Description: stringOrCall(v.Description, v.Measure.Description), - Unit: v.Measure.Unit(), - Type: aggregationToMetricDescriptorType(v), - LabelKeys: tagKeysToLabelKeys(v.TagKeys), - } - return desc, nil -} - -func stringOrCall(first string, call func() string) string { - if first != "" { - return first - } - return call() -} - -type measureType uint - -const ( - measureUnknown measureType = iota - measureInt64 - measureFloat64 -) - -func measureTypeFromMeasure(m stats.Measure) measureType { - switch m.(type) { - default: - return measureUnknown - case *stats.Float64Measure: - return measureFloat64 - case *stats.Int64Measure: - return measureInt64 - } -} - -func aggregationToMetricDescriptorType(v *view.View) metricspb.MetricDescriptor_Type { - if v == nil || v.Aggregation == nil { - return metricspb.MetricDescriptor_UNSPECIFIED - } - if v.Measure == nil { - return metricspb.MetricDescriptor_UNSPECIFIED - } - - switch v.Aggregation.Type { - case view.AggTypeCount: - // Cumulative on int64 - return metricspb.MetricDescriptor_CUMULATIVE_INT64 - - case view.AggTypeDistribution: - // Cumulative types - return metricspb.MetricDescriptor_CUMULATIVE_DISTRIBUTION - - case view.AggTypeLastValue: - // Gauge types - switch measureTypeFromMeasure(v.Measure) { - case measureFloat64: - return metricspb.MetricDescriptor_GAUGE_DOUBLE - case measureInt64: - return metricspb.MetricDescriptor_GAUGE_INT64 - } - - case view.AggTypeSum: - // Cumulative types - switch measureTypeFromMeasure(v.Measure) { - case measureFloat64: - return metricspb.MetricDescriptor_CUMULATIVE_DOUBLE - case measureInt64: - return metricspb.MetricDescriptor_CUMULATIVE_INT64 - } - } - - // For all other cases, return unspecified. - return metricspb.MetricDescriptor_UNSPECIFIED -} - -func tagKeysToLabelKeys(tagKeys []tag.Key) []*metricspb.LabelKey { - labelKeys := make([]*metricspb.LabelKey, 0, len(tagKeys)) - for _, tagKey := range tagKeys { - labelKeys = append(labelKeys, &metricspb.LabelKey{ - Key: tagKey.Name(), - }) - } - return labelKeys -} - -func viewDataToTimeseries(vd *view.Data) ([]*metricspb.TimeSeries, error) { - if vd == nil || len(vd.Rows) == 0 { - return nil, nil - } - - // Given that view.Data only contains Start, End - // the timestamps for all the row data will be the exact same - // per aggregation. However, the values will differ. - // Each row has its own tags. - startTimestamp := timeToProtoTimestamp(vd.Start) - endTimestamp := timeToProtoTimestamp(vd.End) - - mType := measureTypeFromMeasure(vd.View.Measure) - timeseries := make([]*metricspb.TimeSeries, 0, len(vd.Rows)) - // It is imperative that the ordering of "LabelValues" matches those - // of the Label keys in the metric descriptor. - for _, row := range vd.Rows { - labelValues := labelValuesFromTags(row.Tags) - point := rowToPoint(vd.View, row, endTimestamp, mType) - timeseries = append(timeseries, &metricspb.TimeSeries{ - StartTimestamp: startTimestamp, - LabelValues: labelValues, - Points: []*metricspb.Point{point}, - }) - } - - if len(timeseries) == 0 { - return nil, nil - } - - return timeseries, nil -} - -func timeToProtoTimestamp(t time.Time) *timestamp.Timestamp { - unixNano := t.UnixNano() - return ×tamp.Timestamp{ - Seconds: int64(unixNano / 1e9), - Nanos: int32(unixNano % 1e9), - } -} - -func rowToPoint(v *view.View, row *view.Row, endTimestamp *timestamp.Timestamp, mType measureType) *metricspb.Point { - pt := &metricspb.Point{ - Timestamp: endTimestamp, - } - - switch data := row.Data.(type) { - case *view.CountData: - pt.Value = &metricspb.Point_Int64Value{Int64Value: data.Value} - - case *view.DistributionData: - pt.Value = &metricspb.Point_DistributionValue{ - DistributionValue: &metricspb.DistributionValue{ - Count: data.Count, - Sum: float64(data.Count) * data.Mean, // because Mean := Sum/Count - // TODO: Add Exemplar - Buckets: bucketsToProtoBuckets(data.CountPerBucket), - BucketOptions: &metricspb.DistributionValue_BucketOptions{ - Type: &metricspb.DistributionValue_BucketOptions_Explicit_{ - Explicit: &metricspb.DistributionValue_BucketOptions_Explicit{ - Bounds: v.Aggregation.Buckets, - }, - }, - }, - SumOfSquaredDeviation: data.SumOfSquaredDev, - }} - - case *view.LastValueData: - setPointValue(pt, data.Value, mType) - - case *view.SumData: - setPointValue(pt, data.Value, mType) - } - - return pt -} - -// Not returning anything from this function because metricspb.Point.is_Value is an unexported -// interface hence we just have to set its value by pointer. -func setPointValue(pt *metricspb.Point, value float64, mType measureType) { - if mType == measureInt64 { - pt.Value = &metricspb.Point_Int64Value{Int64Value: int64(value)} - } else { - pt.Value = &metricspb.Point_DoubleValue{DoubleValue: value} - } -} - -func bucketsToProtoBuckets(countPerBucket []int64) []*metricspb.DistributionValue_Bucket { - distBuckets := make([]*metricspb.DistributionValue_Bucket, len(countPerBucket)) - for i := 0; i < len(countPerBucket); i++ { - count := countPerBucket[i] - - distBuckets[i] = &metricspb.DistributionValue_Bucket{ - Count: count, - } - } - - return distBuckets -} - -func labelValuesFromTags(tags []tag.Tag) []*metricspb.LabelValue { - if len(tags) == 0 { - return nil - } - - labelValues := make([]*metricspb.LabelValue, 0, len(tags)) - for _, tag_ := range tags { - labelValues = append(labelValues, &metricspb.LabelValue{ - Value: tag_.Value, - - // It is imperative that we set the "HasValue" attribute, - // in order to distinguish missing a label from the empty string. - // https://godoc.org/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1#LabelValue.HasValue - // - // OpenCensus-Go uses non-pointers for tags as seen by this function's arguments, - // so the best case that we can use to distinguish missing labels/tags from the - // empty string is by checking if the Tag.Key.Name() != "" to indicate that we have - // a value. - HasValue: tag_.Key.Name() != "", - }) - } - return labelValues -} diff --git a/cmd/bpa-operator/vendor/contrib.go.opencensus.io/exporter/ocagent/version.go b/cmd/bpa-operator/vendor/contrib.go.opencensus.io/exporter/ocagent/version.go deleted file mode 100644 index 68be4c7..0000000 --- a/cmd/bpa-operator/vendor/contrib.go.opencensus.io/exporter/ocagent/version.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ocagent - -const Version = "0.0.1" diff --git a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/LICENSE b/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/LICENSE deleted file mode 100644 index b9d6a27..0000000 --- a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Microsoft Corporation - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/adal/README.md b/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/adal/README.md deleted file mode 100644 index 7b0c4bc..0000000 --- a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/adal/README.md +++ /dev/null @@ -1,292 +0,0 @@ -# Azure Active Directory authentication for Go - -This is a standalone package for authenticating with Azure Active -Directory from other Go libraries and applications, in particular the [Azure SDK -for Go](https://github.com/Azure/azure-sdk-for-go). - -Note: Despite the package's name it is not related to other "ADAL" libraries -maintained in the [github.com/AzureAD](https://github.com/AzureAD) org. Issues -should be opened in [this repo's](https://github.com/Azure/go-autorest/issues) -or [the SDK's](https://github.com/Azure/azure-sdk-for-go/issues) issue -trackers. - -## Install - -```bash -go get -u github.com/Azure/go-autorest/autorest/adal -``` - -## Usage - -An Active Directory application is required in order to use this library. An application can be registered in the [Azure Portal](https://portal.azure.com/) by following these [guidelines](https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-integrating-applications) or using the [Azure CLI](https://github.com/Azure/azure-cli). - -### Register an Azure AD Application with secret - - -1. Register a new application with a `secret` credential - - ``` - az ad app create \ - --display-name example-app \ - --homepage https://example-app/home \ - --identifier-uris https://example-app/app \ - --password secret - ``` - -2. Create a service principal using the `Application ID` from previous step - - ``` - az ad sp create --id "Application ID" - ``` - - * Replace `Application ID` with `appId` from step 1. - -### Register an Azure AD Application with certificate - -1. Create a private key - - ``` - openssl genrsa -out "example-app.key" 2048 - ``` - -2. Create the certificate - - ``` - openssl req -new -key "example-app.key" -subj "/CN=example-app" -out "example-app.csr" - openssl x509 -req -in "example-app.csr" -signkey "example-app.key" -out "example-app.crt" -days 10000 - ``` - -3. Create the PKCS12 version of the certificate containing also the private key - - ``` - openssl pkcs12 -export -out "example-app.pfx" -inkey "example-app.key" -in "example-app.crt" -passout pass: - - ``` - -4. Register a new application with the certificate content form `example-app.crt` - - ``` - certificateContents="$(tail -n+2 "example-app.crt" | head -n-1)" - - az ad app create \ - --display-name example-app \ - --homepage https://example-app/home \ - --identifier-uris https://example-app/app \ - --key-usage Verify --end-date 2018-01-01 \ - --key-value "${certificateContents}" - ``` - -5. Create a service principal using the `Application ID` from previous step - - ``` - az ad sp create --id "APPLICATION_ID" - ``` - - * Replace `APPLICATION_ID` with `appId` from step 4. - - -### Grant the necessary permissions - -Azure relies on a Role-Based Access Control (RBAC) model to manage the access to resources at a fine-grained -level. There is a set of [pre-defined roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-built-in-roles) -which can be assigned to a service principal of an Azure AD application depending of your needs. - -``` -az role assignment create --assigner "SERVICE_PRINCIPAL_ID" --role "ROLE_NAME" -``` - -* Replace the `SERVICE_PRINCIPAL_ID` with the `appId` from previous step. -* Replace the `ROLE_NAME` with a role name of your choice. - -It is also possible to define custom role definitions. - -``` -az role definition create --role-definition role-definition.json -``` - -* Check [custom roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-control-custom-roles) for more details regarding the content of `role-definition.json` file. - - -### Acquire Access Token - -The common configuration used by all flows: - -```Go -const activeDirectoryEndpoint = "https://login.microsoftonline.com/" -tenantID := "TENANT_ID" -oauthConfig, err := adal.NewOAuthConfig(activeDirectoryEndpoint, tenantID) - -applicationID := "APPLICATION_ID" - -callback := func(token adal.Token) error { - // This is called after the token is acquired -} - -// The resource for which the token is acquired -resource := "https://management.core.windows.net/" -``` - -* Replace the `TENANT_ID` with your tenant ID. -* Replace the `APPLICATION_ID` with the value from previous section. - -#### Client Credentials - -```Go -applicationSecret := "APPLICATION_SECRET" - -spt, err := adal.NewServicePrincipalToken( - oauthConfig, - appliationID, - applicationSecret, - resource, - callbacks...) -if err != nil { - return nil, err -} - -// Acquire a new access token -err = spt.Refresh() -if (err == nil) { - token := spt.Token -} -``` - -* Replace the `APPLICATION_SECRET` with the `password` value from previous section. - -#### Client Certificate - -```Go -certificatePath := "./example-app.pfx" - -certData, err := ioutil.ReadFile(certificatePath) -if err != nil { - return nil, fmt.Errorf("failed to read the certificate file (%s): %v", certificatePath, err) -} - -// Get the certificate and private key from pfx file -certificate, rsaPrivateKey, err := decodePkcs12(certData, "") -if err != nil { - return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err) -} - -spt, err := adal.NewServicePrincipalTokenFromCertificate( - oauthConfig, - applicationID, - certificate, - rsaPrivateKey, - resource, - callbacks...) - -// Acquire a new access token -err = spt.Refresh() -if (err == nil) { - token := spt.Token -} -``` - -* Update the certificate path to point to the example-app.pfx file which was created in previous section. - - -#### Device Code - -```Go -oauthClient := &http.Client{} - -// Acquire the device code -deviceCode, err := adal.InitiateDeviceAuth( - oauthClient, - oauthConfig, - applicationID, - resource) -if err != nil { - return nil, fmt.Errorf("Failed to start device auth flow: %s", err) -} - -// Display the authentication message -fmt.Println(*deviceCode.Message) - -// Wait here until the user is authenticated -token, err := adal.WaitForUserCompletion(oauthClient, deviceCode) -if err != nil { - return nil, fmt.Errorf("Failed to finish device auth flow: %s", err) -} - -spt, err := adal.NewServicePrincipalTokenFromManualToken( - oauthConfig, - applicationID, - resource, - *token, - callbacks...) - -if (err == nil) { - token := spt.Token -} -``` - -#### Username password authenticate - -```Go -spt, err := adal.NewServicePrincipalTokenFromUsernamePassword( - oauthConfig, - applicationID, - username, - password, - resource, - callbacks...) - -if (err == nil) { - token := spt.Token -} -``` - -#### Authorization code authenticate - -``` Go -spt, err := adal.NewServicePrincipalTokenFromAuthorizationCode( - oauthConfig, - applicationID, - clientSecret, - authorizationCode, - redirectURI, - resource, - callbacks...) - -err = spt.Refresh() -if (err == nil) { - token := spt.Token -} -``` - -### Command Line Tool - -A command line tool is available in `cmd/adal.go` that can acquire a token for a given resource. It supports all flows mentioned above. - -``` -adal -h - -Usage of ./adal: - -applicationId string - application id - -certificatePath string - path to pk12/PFC application certificate - -mode string - authentication mode (device, secret, cert, refresh) (default "device") - -resource string - resource for which the token is requested - -secret string - application secret - -tenantId string - tenant id - -tokenCachePath string - location of oath token cache (default "/home/cgc/.adal/accessToken.json") -``` - -Example acquire a token for `https://management.core.windows.net/` using device code flow: - -``` -adal -mode device \ - -applicationId "APPLICATION_ID" \ - -tenantId "TENANT_ID" \ - -resource https://management.core.windows.net/ - -``` diff --git a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/adal/config.go b/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/adal/config.go deleted file mode 100644 index 8c83a91..0000000 --- a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/adal/config.go +++ /dev/null @@ -1,91 +0,0 @@ -package adal - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "fmt" - "net/url" -) - -// OAuthConfig represents the endpoints needed -// in OAuth operations -type OAuthConfig struct { - AuthorityEndpoint url.URL `json:"authorityEndpoint"` - AuthorizeEndpoint url.URL `json:"authorizeEndpoint"` - TokenEndpoint url.URL `json:"tokenEndpoint"` - DeviceCodeEndpoint url.URL `json:"deviceCodeEndpoint"` -} - -// IsZero returns true if the OAuthConfig object is zero-initialized. -func (oac OAuthConfig) IsZero() bool { - return oac == OAuthConfig{} -} - -func validateStringParam(param, name string) error { - if len(param) == 0 { - return fmt.Errorf("parameter '" + name + "' cannot be empty") - } - return nil -} - -// NewOAuthConfig returns an OAuthConfig with tenant specific urls -func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) { - apiVer := "1.0" - return NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID, &apiVer) -} - -// NewOAuthConfigWithAPIVersion returns an OAuthConfig with tenant specific urls. -// If apiVersion is not nil the "api-version" query parameter will be appended to the endpoint URLs with the specified value. -func NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID string, apiVersion *string) (*OAuthConfig, error) { - if err := validateStringParam(activeDirectoryEndpoint, "activeDirectoryEndpoint"); err != nil { - return nil, err - } - api := "" - // it's legal for tenantID to be empty so don't validate it - if apiVersion != nil { - if err := validateStringParam(*apiVersion, "apiVersion"); err != nil { - return nil, err - } - api = fmt.Sprintf("?api-version=%s", *apiVersion) - } - const activeDirectoryEndpointTemplate = "%s/oauth2/%s%s" - u, err := url.Parse(activeDirectoryEndpoint) - if err != nil { - return nil, err - } - authorityURL, err := u.Parse(tenantID) - if err != nil { - return nil, err - } - authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", api)) - if err != nil { - return nil, err - } - tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", api)) - if err != nil { - return nil, err - } - deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", api)) - if err != nil { - return nil, err - } - - return &OAuthConfig{ - AuthorityEndpoint: *authorityURL, - AuthorizeEndpoint: *authorizeURL, - TokenEndpoint: *tokenURL, - DeviceCodeEndpoint: *deviceCodeURL, - }, nil -} diff --git a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go b/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go deleted file mode 100644 index b38f4c2..0000000 --- a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go +++ /dev/null @@ -1,242 +0,0 @@ -package adal - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* - This file is largely based on rjw57/oauth2device's code, with the follow differences: - * scope -> resource, and only allow a single one - * receive "Message" in the DeviceCode struct and show it to users as the prompt - * azure-xplat-cli has the following behavior that this emulates: - - does not send client_secret during the token exchange - - sends resource again in the token exchange request -*/ - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "strings" - "time" -) - -const ( - logPrefix = "autorest/adal/devicetoken:" -) - -var ( - // ErrDeviceGeneric represents an unknown error from the token endpoint when using device flow - ErrDeviceGeneric = fmt.Errorf("%s Error while retrieving OAuth token: Unknown Error", logPrefix) - - // ErrDeviceAccessDenied represents an access denied error from the token endpoint when using device flow - ErrDeviceAccessDenied = fmt.Errorf("%s Error while retrieving OAuth token: Access Denied", logPrefix) - - // ErrDeviceAuthorizationPending represents the server waiting on the user to complete the device flow - ErrDeviceAuthorizationPending = fmt.Errorf("%s Error while retrieving OAuth token: Authorization Pending", logPrefix) - - // ErrDeviceCodeExpired represents the server timing out and expiring the code during device flow - ErrDeviceCodeExpired = fmt.Errorf("%s Error while retrieving OAuth token: Code Expired", logPrefix) - - // ErrDeviceSlowDown represents the service telling us we're polling too often during device flow - ErrDeviceSlowDown = fmt.Errorf("%s Error while retrieving OAuth token: Slow Down", logPrefix) - - // ErrDeviceCodeEmpty represents an empty device code from the device endpoint while using device flow - ErrDeviceCodeEmpty = fmt.Errorf("%s Error while retrieving device code: Device Code Empty", logPrefix) - - // ErrOAuthTokenEmpty represents an empty OAuth token from the token endpoint when using device flow - ErrOAuthTokenEmpty = fmt.Errorf("%s Error while retrieving OAuth token: Token Empty", logPrefix) - - errCodeSendingFails = "Error occurred while sending request for Device Authorization Code" - errCodeHandlingFails = "Error occurred while handling response from the Device Endpoint" - errTokenSendingFails = "Error occurred while sending request with device code for a token" - errTokenHandlingFails = "Error occurred while handling response from the Token Endpoint (during device flow)" - errStatusNotOK = "Error HTTP status != 200" -) - -// DeviceCode is the object returned by the device auth endpoint -// It contains information to instruct the user to complete the auth flow -type DeviceCode struct { - DeviceCode *string `json:"device_code,omitempty"` - UserCode *string `json:"user_code,omitempty"` - VerificationURL *string `json:"verification_url,omitempty"` - ExpiresIn *int64 `json:"expires_in,string,omitempty"` - Interval *int64 `json:"interval,string,omitempty"` - - Message *string `json:"message"` // Azure specific - Resource string // store the following, stored when initiating, used when exchanging - OAuthConfig OAuthConfig - ClientID string -} - -// TokenError is the object returned by the token exchange endpoint -// when something is amiss -type TokenError struct { - Error *string `json:"error,omitempty"` - ErrorCodes []int `json:"error_codes,omitempty"` - ErrorDescription *string `json:"error_description,omitempty"` - Timestamp *string `json:"timestamp,omitempty"` - TraceID *string `json:"trace_id,omitempty"` -} - -// DeviceToken is the object return by the token exchange endpoint -// It can either look like a Token or an ErrorToken, so put both here -// and check for presence of "Error" to know if we are in error state -type deviceToken struct { - Token - TokenError -} - -// InitiateDeviceAuth initiates a device auth flow. It returns a DeviceCode -// that can be used with CheckForUserCompletion or WaitForUserCompletion. -func InitiateDeviceAuth(sender Sender, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) { - v := url.Values{ - "client_id": []string{clientID}, - "resource": []string{resource}, - } - - s := v.Encode() - body := ioutil.NopCloser(strings.NewReader(s)) - - req, err := http.NewRequest(http.MethodPost, oauthConfig.DeviceCodeEndpoint.String(), body) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error()) - } - - req.ContentLength = int64(len(s)) - req.Header.Set(contentType, mimeTypeFormPost) - resp, err := sender.Do(req) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error()) - } - defer resp.Body.Close() - - rb, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) - } - - if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, errStatusNotOK) - } - - if len(strings.Trim(string(rb), " ")) == 0 { - return nil, ErrDeviceCodeEmpty - } - - var code DeviceCode - err = json.Unmarshal(rb, &code) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) - } - - code.ClientID = clientID - code.Resource = resource - code.OAuthConfig = oauthConfig - - return &code, nil -} - -// CheckForUserCompletion takes a DeviceCode and checks with the Azure AD OAuth endpoint -// to see if the device flow has: been completed, timed out, or otherwise failed -func CheckForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) { - v := url.Values{ - "client_id": []string{code.ClientID}, - "code": []string{*code.DeviceCode}, - "grant_type": []string{OAuthGrantTypeDeviceCode}, - "resource": []string{code.Resource}, - } - - s := v.Encode() - body := ioutil.NopCloser(strings.NewReader(s)) - - req, err := http.NewRequest(http.MethodPost, code.OAuthConfig.TokenEndpoint.String(), body) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error()) - } - - req.ContentLength = int64(len(s)) - req.Header.Set(contentType, mimeTypeFormPost) - resp, err := sender.Do(req) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error()) - } - defer resp.Body.Close() - - rb, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) - } - - if resp.StatusCode != http.StatusOK && len(strings.Trim(string(rb), " ")) == 0 { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, errStatusNotOK) - } - if len(strings.Trim(string(rb), " ")) == 0 { - return nil, ErrOAuthTokenEmpty - } - - var token deviceToken - err = json.Unmarshal(rb, &token) - if err != nil { - return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) - } - - if token.Error == nil { - return &token.Token, nil - } - - switch *token.Error { - case "authorization_pending": - return nil, ErrDeviceAuthorizationPending - case "slow_down": - return nil, ErrDeviceSlowDown - case "access_denied": - return nil, ErrDeviceAccessDenied - case "code_expired": - return nil, ErrDeviceCodeExpired - default: - return nil, ErrDeviceGeneric - } -} - -// WaitForUserCompletion calls CheckForUserCompletion repeatedly until a token is granted or an error state occurs. -// This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'. -func WaitForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) { - intervalDuration := time.Duration(*code.Interval) * time.Second - waitDuration := intervalDuration - - for { - token, err := CheckForUserCompletion(sender, code) - - if err == nil { - return token, nil - } - - switch err { - case ErrDeviceSlowDown: - waitDuration += waitDuration - case ErrDeviceAuthorizationPending: - // noop - default: // everything else is "fatal" to us - return nil, err - } - - if waitDuration > (intervalDuration * 3) { - return nil, fmt.Errorf("%s Error waiting for user to complete device flow. Server told us to slow_down too much", logPrefix) - } - - time.Sleep(waitDuration) - } -} diff --git a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go b/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go deleted file mode 100644 index 9e15f27..0000000 --- a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go +++ /dev/null @@ -1,73 +0,0 @@ -package adal - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path/filepath" -) - -// LoadToken restores a Token object from a file located at 'path'. -func LoadToken(path string) (*Token, error) { - file, err := os.Open(path) - if err != nil { - return nil, fmt.Errorf("failed to open file (%s) while loading token: %v", path, err) - } - defer file.Close() - - var token Token - - dec := json.NewDecoder(file) - if err = dec.Decode(&token); err != nil { - return nil, fmt.Errorf("failed to decode contents of file (%s) into Token representation: %v", path, err) - } - return &token, nil -} - -// SaveToken persists an oauth token at the given location on disk. -// It moves the new file into place so it can safely be used to replace an existing file -// that maybe accessed by multiple processes. -func SaveToken(path string, mode os.FileMode, token Token) error { - dir := filepath.Dir(path) - err := os.MkdirAll(dir, os.ModePerm) - if err != nil { - return fmt.Errorf("failed to create directory (%s) to store token in: %v", dir, err) - } - - newFile, err := ioutil.TempFile(dir, "token") - if err != nil { - return fmt.Errorf("failed to create the temp file to write the token: %v", err) - } - tempPath := newFile.Name() - - if err := json.NewEncoder(newFile).Encode(token); err != nil { - return fmt.Errorf("failed to encode token to file (%s) while saving token: %v", tempPath, err) - } - if err := newFile.Close(); err != nil { - return fmt.Errorf("failed to close temp file %s: %v", tempPath, err) - } - - // Atomic replace to avoid multi-writer file corruptions - if err := os.Rename(tempPath, path); err != nil { - return fmt.Errorf("failed to move temporary token to desired output location. src=%s dst=%s: %v", tempPath, path, err) - } - if err := os.Chmod(path, mode); err != nil { - return fmt.Errorf("failed to chmod the token file %s: %v", path, err) - } - return nil -} diff --git a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go b/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go deleted file mode 100644 index 834401e..0000000 --- a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go +++ /dev/null @@ -1,60 +0,0 @@ -package adal - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "net/http" -) - -const ( - contentType = "Content-Type" - mimeTypeFormPost = "application/x-www-form-urlencoded" -) - -// Sender is the interface that wraps the Do method to send HTTP requests. -// -// The standard http.Client conforms to this interface. -type Sender interface { - Do(*http.Request) (*http.Response, error) -} - -// SenderFunc is a method that implements the Sender interface. -type SenderFunc func(*http.Request) (*http.Response, error) - -// Do implements the Sender interface on SenderFunc. -func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { - return sf(r) -} - -// SendDecorator takes and possibly decorates, by wrapping, a Sender. Decorators may affect the -// http.Request and pass it along or, first, pass the http.Request along then react to the -// http.Response result. -type SendDecorator func(Sender) Sender - -// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. -func CreateSender(decorators ...SendDecorator) Sender { - return DecorateSender(&http.Client{}, decorators...) -} - -// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to -// the Sender. Decorators are applied in the order received, but their affect upon the request -// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a -// post-decorator (pass the http.Request along and react to the results in http.Response). -func DecorateSender(s Sender, decorators ...SendDecorator) Sender { - for _, decorate := range decorators { - s = decorate(s) - } - return s -} diff --git a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/adal/token.go b/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/adal/token.go deleted file mode 100644 index effa87a..0000000 --- a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/adal/token.go +++ /dev/null @@ -1,985 +0,0 @@ -package adal - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "context" - "crypto/rand" - "crypto/rsa" - "crypto/sha1" - "crypto/x509" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "math" - "net" - "net/http" - "net/url" - "strings" - "sync" - "time" - - "github.com/Azure/go-autorest/autorest/date" - "github.com/Azure/go-autorest/tracing" - "github.com/dgrijalva/jwt-go" -) - -const ( - defaultRefresh = 5 * time.Minute - - // OAuthGrantTypeDeviceCode is the "grant_type" identifier used in device flow - OAuthGrantTypeDeviceCode = "device_code" - - // OAuthGrantTypeClientCredentials is the "grant_type" identifier used in credential flows - OAuthGrantTypeClientCredentials = "client_credentials" - - // OAuthGrantTypeUserPass is the "grant_type" identifier used in username and password auth flows - OAuthGrantTypeUserPass = "password" - - // OAuthGrantTypeRefreshToken is the "grant_type" identifier used in refresh token flows - OAuthGrantTypeRefreshToken = "refresh_token" - - // OAuthGrantTypeAuthorizationCode is the "grant_type" identifier used in authorization code flows - OAuthGrantTypeAuthorizationCode = "authorization_code" - - // metadataHeader is the header required by MSI extension - metadataHeader = "Metadata" - - // msiEndpoint is the well known endpoint for getting MSI authentications tokens - msiEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token" - - // the default number of attempts to refresh an MSI authentication token - defaultMaxMSIRefreshAttempts = 5 -) - -// OAuthTokenProvider is an interface which should be implemented by an access token retriever -type OAuthTokenProvider interface { - OAuthToken() string -} - -// TokenRefreshError is an interface used by errors returned during token refresh. -type TokenRefreshError interface { - error - Response() *http.Response -} - -// Refresher is an interface for token refresh functionality -type Refresher interface { - Refresh() error - RefreshExchange(resource string) error - EnsureFresh() error -} - -// RefresherWithContext is an interface for token refresh functionality -type RefresherWithContext interface { - RefreshWithContext(ctx context.Context) error - RefreshExchangeWithContext(ctx context.Context, resource string) error - EnsureFreshWithContext(ctx context.Context) error -} - -// TokenRefreshCallback is the type representing callbacks that will be called after -// a successful token refresh -type TokenRefreshCallback func(Token) error - -// Token encapsulates the access token used to authorize Azure requests. -// https://docs.microsoft.com/en-us/azure/active-directory/develop/v1-oauth2-client-creds-grant-flow#service-to-service-access-token-response -type Token struct { - AccessToken string `json:"access_token"` - RefreshToken string `json:"refresh_token"` - - ExpiresIn json.Number `json:"expires_in"` - ExpiresOn json.Number `json:"expires_on"` - NotBefore json.Number `json:"not_before"` - - Resource string `json:"resource"` - Type string `json:"token_type"` -} - -func newToken() Token { - return Token{ - ExpiresIn: "0", - ExpiresOn: "0", - NotBefore: "0", - } -} - -// IsZero returns true if the token object is zero-initialized. -func (t Token) IsZero() bool { - return t == Token{} -} - -// Expires returns the time.Time when the Token expires. -func (t Token) Expires() time.Time { - s, err := t.ExpiresOn.Float64() - if err != nil { - s = -3600 - } - - expiration := date.NewUnixTimeFromSeconds(s) - - return time.Time(expiration).UTC() -} - -// IsExpired returns true if the Token is expired, false otherwise. -func (t Token) IsExpired() bool { - return t.WillExpireIn(0) -} - -// WillExpireIn returns true if the Token will expire after the passed time.Duration interval -// from now, false otherwise. -func (t Token) WillExpireIn(d time.Duration) bool { - return !t.Expires().After(time.Now().Add(d)) -} - -//OAuthToken return the current access token -func (t *Token) OAuthToken() string { - return t.AccessToken -} - -// ServicePrincipalSecret is an interface that allows various secret mechanism to fill the form -// that is submitted when acquiring an oAuth token. -type ServicePrincipalSecret interface { - SetAuthenticationValues(spt *ServicePrincipalToken, values *url.Values) error -} - -// ServicePrincipalNoSecret represents a secret type that contains no secret -// meaning it is not valid for fetching a fresh token. This is used by Manual -type ServicePrincipalNoSecret struct { -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalSecret -// It only returns an error for the ServicePrincipalNoSecret type -func (noSecret *ServicePrincipalNoSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - return fmt.Errorf("Manually created ServicePrincipalToken does not contain secret material to retrieve a new access token") -} - -// MarshalJSON implements the json.Marshaler interface. -func (noSecret ServicePrincipalNoSecret) MarshalJSON() ([]byte, error) { - type tokenType struct { - Type string `json:"type"` - } - return json.Marshal(tokenType{ - Type: "ServicePrincipalNoSecret", - }) -} - -// ServicePrincipalTokenSecret implements ServicePrincipalSecret for client_secret type authorization. -type ServicePrincipalTokenSecret struct { - ClientSecret string `json:"value"` -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. -// It will populate the form submitted during oAuth Token Acquisition using the client_secret. -func (tokenSecret *ServicePrincipalTokenSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - v.Set("client_secret", tokenSecret.ClientSecret) - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (tokenSecret ServicePrincipalTokenSecret) MarshalJSON() ([]byte, error) { - type tokenType struct { - Type string `json:"type"` - Value string `json:"value"` - } - return json.Marshal(tokenType{ - Type: "ServicePrincipalTokenSecret", - Value: tokenSecret.ClientSecret, - }) -} - -// ServicePrincipalCertificateSecret implements ServicePrincipalSecret for generic RSA cert auth with signed JWTs. -type ServicePrincipalCertificateSecret struct { - Certificate *x509.Certificate - PrivateKey *rsa.PrivateKey -} - -// SignJwt returns the JWT signed with the certificate's private key. -func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalToken) (string, error) { - hasher := sha1.New() - _, err := hasher.Write(secret.Certificate.Raw) - if err != nil { - return "", err - } - - thumbprint := base64.URLEncoding.EncodeToString(hasher.Sum(nil)) - - // The jti (JWT ID) claim provides a unique identifier for the JWT. - jti := make([]byte, 20) - _, err = rand.Read(jti) - if err != nil { - return "", err - } - - token := jwt.New(jwt.SigningMethodRS256) - token.Header["x5t"] = thumbprint - x5c := []string{base64.StdEncoding.EncodeToString(secret.Certificate.Raw)} - token.Header["x5c"] = x5c - token.Claims = jwt.MapClaims{ - "aud": spt.inner.OauthConfig.TokenEndpoint.String(), - "iss": spt.inner.ClientID, - "sub": spt.inner.ClientID, - "jti": base64.URLEncoding.EncodeToString(jti), - "nbf": time.Now().Unix(), - "exp": time.Now().Add(time.Hour * 24).Unix(), - } - - signedString, err := token.SignedString(secret.PrivateKey) - return signedString, err -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. -// It will populate the form submitted during oAuth Token Acquisition using a JWT signed with a certificate. -func (secret *ServicePrincipalCertificateSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - jwt, err := secret.SignJwt(spt) - if err != nil { - return err - } - - v.Set("client_assertion", jwt) - v.Set("client_assertion_type", "urn:ietf:params:oauth:client-assertion-type:jwt-bearer") - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (secret ServicePrincipalCertificateSecret) MarshalJSON() ([]byte, error) { - return nil, errors.New("marshalling ServicePrincipalCertificateSecret is not supported") -} - -// ServicePrincipalMSISecret implements ServicePrincipalSecret for machines running the MSI Extension. -type ServicePrincipalMSISecret struct { -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. -func (msiSecret *ServicePrincipalMSISecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (msiSecret ServicePrincipalMSISecret) MarshalJSON() ([]byte, error) { - return nil, errors.New("marshalling ServicePrincipalMSISecret is not supported") -} - -// ServicePrincipalUsernamePasswordSecret implements ServicePrincipalSecret for username and password auth. -type ServicePrincipalUsernamePasswordSecret struct { - Username string `json:"username"` - Password string `json:"password"` -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. -func (secret *ServicePrincipalUsernamePasswordSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - v.Set("username", secret.Username) - v.Set("password", secret.Password) - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (secret ServicePrincipalUsernamePasswordSecret) MarshalJSON() ([]byte, error) { - type tokenType struct { - Type string `json:"type"` - Username string `json:"username"` - Password string `json:"password"` - } - return json.Marshal(tokenType{ - Type: "ServicePrincipalUsernamePasswordSecret", - Username: secret.Username, - Password: secret.Password, - }) -} - -// ServicePrincipalAuthorizationCodeSecret implements ServicePrincipalSecret for authorization code auth. -type ServicePrincipalAuthorizationCodeSecret struct { - ClientSecret string `json:"value"` - AuthorizationCode string `json:"authCode"` - RedirectURI string `json:"redirect"` -} - -// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. -func (secret *ServicePrincipalAuthorizationCodeSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { - v.Set("code", secret.AuthorizationCode) - v.Set("client_secret", secret.ClientSecret) - v.Set("redirect_uri", secret.RedirectURI) - return nil -} - -// MarshalJSON implements the json.Marshaler interface. -func (secret ServicePrincipalAuthorizationCodeSecret) MarshalJSON() ([]byte, error) { - type tokenType struct { - Type string `json:"type"` - Value string `json:"value"` - AuthCode string `json:"authCode"` - Redirect string `json:"redirect"` - } - return json.Marshal(tokenType{ - Type: "ServicePrincipalAuthorizationCodeSecret", - Value: secret.ClientSecret, - AuthCode: secret.AuthorizationCode, - Redirect: secret.RedirectURI, - }) -} - -// ServicePrincipalToken encapsulates a Token created for a Service Principal. -type ServicePrincipalToken struct { - inner servicePrincipalToken - refreshLock *sync.RWMutex - sender Sender - refreshCallbacks []TokenRefreshCallback - // MaxMSIRefreshAttempts is the maximum number of attempts to refresh an MSI token. - MaxMSIRefreshAttempts int -} - -// MarshalTokenJSON returns the marshalled inner token. -func (spt ServicePrincipalToken) MarshalTokenJSON() ([]byte, error) { - return json.Marshal(spt.inner.Token) -} - -// SetRefreshCallbacks replaces any existing refresh callbacks with the specified callbacks. -func (spt *ServicePrincipalToken) SetRefreshCallbacks(callbacks []TokenRefreshCallback) { - spt.refreshCallbacks = callbacks -} - -// MarshalJSON implements the json.Marshaler interface. -func (spt ServicePrincipalToken) MarshalJSON() ([]byte, error) { - return json.Marshal(spt.inner) -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (spt *ServicePrincipalToken) UnmarshalJSON(data []byte) error { - // need to determine the token type - raw := map[string]interface{}{} - err := json.Unmarshal(data, &raw) - if err != nil { - return err - } - secret := raw["secret"].(map[string]interface{}) - switch secret["type"] { - case "ServicePrincipalNoSecret": - spt.inner.Secret = &ServicePrincipalNoSecret{} - case "ServicePrincipalTokenSecret": - spt.inner.Secret = &ServicePrincipalTokenSecret{} - case "ServicePrincipalCertificateSecret": - return errors.New("unmarshalling ServicePrincipalCertificateSecret is not supported") - case "ServicePrincipalMSISecret": - return errors.New("unmarshalling ServicePrincipalMSISecret is not supported") - case "ServicePrincipalUsernamePasswordSecret": - spt.inner.Secret = &ServicePrincipalUsernamePasswordSecret{} - case "ServicePrincipalAuthorizationCodeSecret": - spt.inner.Secret = &ServicePrincipalAuthorizationCodeSecret{} - default: - return fmt.Errorf("unrecognized token type '%s'", secret["type"]) - } - err = json.Unmarshal(data, &spt.inner) - if err != nil { - return err - } - // Don't override the refreshLock or the sender if those have been already set. - if spt.refreshLock == nil { - spt.refreshLock = &sync.RWMutex{} - } - if spt.sender == nil { - spt.sender = &http.Client{Transport: tracing.Transport} - } - return nil -} - -// internal type used for marshalling/unmarshalling -type servicePrincipalToken struct { - Token Token `json:"token"` - Secret ServicePrincipalSecret `json:"secret"` - OauthConfig OAuthConfig `json:"oauth"` - ClientID string `json:"clientID"` - Resource string `json:"resource"` - AutoRefresh bool `json:"autoRefresh"` - RefreshWithin time.Duration `json:"refreshWithin"` -} - -func validateOAuthConfig(oac OAuthConfig) error { - if oac.IsZero() { - return fmt.Errorf("parameter 'oauthConfig' cannot be zero-initialized") - } - return nil -} - -// NewServicePrincipalTokenWithSecret create a ServicePrincipalToken using the supplied ServicePrincipalSecret implementation. -func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, resource string, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if err := validateOAuthConfig(oauthConfig); err != nil { - return nil, err - } - if err := validateStringParam(id, "id"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - if secret == nil { - return nil, fmt.Errorf("parameter 'secret' cannot be nil") - } - spt := &ServicePrincipalToken{ - inner: servicePrincipalToken{ - Token: newToken(), - OauthConfig: oauthConfig, - Secret: secret, - ClientID: id, - Resource: resource, - AutoRefresh: true, - RefreshWithin: defaultRefresh, - }, - refreshLock: &sync.RWMutex{}, - sender: &http.Client{Transport: tracing.Transport}, - refreshCallbacks: callbacks, - } - return spt, nil -} - -// NewServicePrincipalTokenFromManualToken creates a ServicePrincipalToken using the supplied token -func NewServicePrincipalTokenFromManualToken(oauthConfig OAuthConfig, clientID string, resource string, token Token, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if err := validateOAuthConfig(oauthConfig); err != nil { - return nil, err - } - if err := validateStringParam(clientID, "clientID"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - if token.IsZero() { - return nil, fmt.Errorf("parameter 'token' cannot be zero-initialized") - } - spt, err := NewServicePrincipalTokenWithSecret( - oauthConfig, - clientID, - resource, - &ServicePrincipalNoSecret{}, - callbacks...) - if err != nil { - return nil, err - } - - spt.inner.Token = token - - return spt, nil -} - -// NewServicePrincipalTokenFromManualTokenSecret creates a ServicePrincipalToken using the supplied token and secret -func NewServicePrincipalTokenFromManualTokenSecret(oauthConfig OAuthConfig, clientID string, resource string, token Token, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if err := validateOAuthConfig(oauthConfig); err != nil { - return nil, err - } - if err := validateStringParam(clientID, "clientID"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - if secret == nil { - return nil, fmt.Errorf("parameter 'secret' cannot be nil") - } - if token.IsZero() { - return nil, fmt.Errorf("parameter 'token' cannot be zero-initialized") - } - spt, err := NewServicePrincipalTokenWithSecret( - oauthConfig, - clientID, - resource, - secret, - callbacks...) - if err != nil { - return nil, err - } - - spt.inner.Token = token - - return spt, nil -} - -// NewServicePrincipalToken creates a ServicePrincipalToken from the supplied Service Principal -// credentials scoped to the named resource. -func NewServicePrincipalToken(oauthConfig OAuthConfig, clientID string, secret string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if err := validateOAuthConfig(oauthConfig); err != nil { - return nil, err - } - if err := validateStringParam(clientID, "clientID"); err != nil { - return nil, err - } - if err := validateStringParam(secret, "secret"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - return NewServicePrincipalTokenWithSecret( - oauthConfig, - clientID, - resource, - &ServicePrincipalTokenSecret{ - ClientSecret: secret, - }, - callbacks..., - ) -} - -// NewServicePrincipalTokenFromCertificate creates a ServicePrincipalToken from the supplied pkcs12 bytes. -func NewServicePrincipalTokenFromCertificate(oauthConfig OAuthConfig, clientID string, certificate *x509.Certificate, privateKey *rsa.PrivateKey, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if err := validateOAuthConfig(oauthConfig); err != nil { - return nil, err - } - if err := validateStringParam(clientID, "clientID"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - if certificate == nil { - return nil, fmt.Errorf("parameter 'certificate' cannot be nil") - } - if privateKey == nil { - return nil, fmt.Errorf("parameter 'privateKey' cannot be nil") - } - return NewServicePrincipalTokenWithSecret( - oauthConfig, - clientID, - resource, - &ServicePrincipalCertificateSecret{ - PrivateKey: privateKey, - Certificate: certificate, - }, - callbacks..., - ) -} - -// NewServicePrincipalTokenFromUsernamePassword creates a ServicePrincipalToken from the username and password. -func NewServicePrincipalTokenFromUsernamePassword(oauthConfig OAuthConfig, clientID string, username string, password string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if err := validateOAuthConfig(oauthConfig); err != nil { - return nil, err - } - if err := validateStringParam(clientID, "clientID"); err != nil { - return nil, err - } - if err := validateStringParam(username, "username"); err != nil { - return nil, err - } - if err := validateStringParam(password, "password"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - return NewServicePrincipalTokenWithSecret( - oauthConfig, - clientID, - resource, - &ServicePrincipalUsernamePasswordSecret{ - Username: username, - Password: password, - }, - callbacks..., - ) -} - -// NewServicePrincipalTokenFromAuthorizationCode creates a ServicePrincipalToken from the -func NewServicePrincipalTokenFromAuthorizationCode(oauthConfig OAuthConfig, clientID string, clientSecret string, authorizationCode string, redirectURI string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - - if err := validateOAuthConfig(oauthConfig); err != nil { - return nil, err - } - if err := validateStringParam(clientID, "clientID"); err != nil { - return nil, err - } - if err := validateStringParam(clientSecret, "clientSecret"); err != nil { - return nil, err - } - if err := validateStringParam(authorizationCode, "authorizationCode"); err != nil { - return nil, err - } - if err := validateStringParam(redirectURI, "redirectURI"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - - return NewServicePrincipalTokenWithSecret( - oauthConfig, - clientID, - resource, - &ServicePrincipalAuthorizationCodeSecret{ - ClientSecret: clientSecret, - AuthorizationCode: authorizationCode, - RedirectURI: redirectURI, - }, - callbacks..., - ) -} - -// GetMSIVMEndpoint gets the MSI endpoint on Virtual Machines. -func GetMSIVMEndpoint() (string, error) { - return msiEndpoint, nil -} - -// NewServicePrincipalTokenFromMSI creates a ServicePrincipalToken via the MSI VM Extension. -// It will use the system assigned identity when creating the token. -func NewServicePrincipalTokenFromMSI(msiEndpoint, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - return newServicePrincipalTokenFromMSI(msiEndpoint, resource, nil, callbacks...) -} - -// NewServicePrincipalTokenFromMSIWithUserAssignedID creates a ServicePrincipalToken via the MSI VM Extension. -// It will use the specified user assigned identity when creating the token. -func NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint, resource string, userAssignedID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - return newServicePrincipalTokenFromMSI(msiEndpoint, resource, &userAssignedID, callbacks...) -} - -func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedID *string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { - if err := validateStringParam(msiEndpoint, "msiEndpoint"); err != nil { - return nil, err - } - if err := validateStringParam(resource, "resource"); err != nil { - return nil, err - } - if userAssignedID != nil { - if err := validateStringParam(*userAssignedID, "userAssignedID"); err != nil { - return nil, err - } - } - // We set the oauth config token endpoint to be MSI's endpoint - msiEndpointURL, err := url.Parse(msiEndpoint) - if err != nil { - return nil, err - } - - v := url.Values{} - v.Set("resource", resource) - v.Set("api-version", "2018-02-01") - if userAssignedID != nil { - v.Set("client_id", *userAssignedID) - } - msiEndpointURL.RawQuery = v.Encode() - - spt := &ServicePrincipalToken{ - inner: servicePrincipalToken{ - Token: newToken(), - OauthConfig: OAuthConfig{ - TokenEndpoint: *msiEndpointURL, - }, - Secret: &ServicePrincipalMSISecret{}, - Resource: resource, - AutoRefresh: true, - RefreshWithin: defaultRefresh, - }, - refreshLock: &sync.RWMutex{}, - sender: &http.Client{Transport: tracing.Transport}, - refreshCallbacks: callbacks, - MaxMSIRefreshAttempts: defaultMaxMSIRefreshAttempts, - } - - if userAssignedID != nil { - spt.inner.ClientID = *userAssignedID - } - - return spt, nil -} - -// internal type that implements TokenRefreshError -type tokenRefreshError struct { - message string - resp *http.Response -} - -// Error implements the error interface which is part of the TokenRefreshError interface. -func (tre tokenRefreshError) Error() string { - return tre.message -} - -// Response implements the TokenRefreshError interface, it returns the raw HTTP response from the refresh operation. -func (tre tokenRefreshError) Response() *http.Response { - return tre.resp -} - -func newTokenRefreshError(message string, resp *http.Response) TokenRefreshError { - return tokenRefreshError{message: message, resp: resp} -} - -// EnsureFresh will refresh the token if it will expire within the refresh window (as set by -// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. -func (spt *ServicePrincipalToken) EnsureFresh() error { - return spt.EnsureFreshWithContext(context.Background()) -} - -// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by -// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. -func (spt *ServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error { - if spt.inner.AutoRefresh && spt.inner.Token.WillExpireIn(spt.inner.RefreshWithin) { - // take the write lock then check to see if the token was already refreshed - spt.refreshLock.Lock() - defer spt.refreshLock.Unlock() - if spt.inner.Token.WillExpireIn(spt.inner.RefreshWithin) { - return spt.refreshInternal(ctx, spt.inner.Resource) - } - } - return nil -} - -// InvokeRefreshCallbacks calls any TokenRefreshCallbacks that were added to the SPT during initialization -func (spt *ServicePrincipalToken) InvokeRefreshCallbacks(token Token) error { - if spt.refreshCallbacks != nil { - for _, callback := range spt.refreshCallbacks { - err := callback(spt.inner.Token) - if err != nil { - return fmt.Errorf("adal: TokenRefreshCallback handler failed. Error = '%v'", err) - } - } - } - return nil -} - -// Refresh obtains a fresh token for the Service Principal. -// This method is not safe for concurrent use and should be syncrhonized. -func (spt *ServicePrincipalToken) Refresh() error { - return spt.RefreshWithContext(context.Background()) -} - -// RefreshWithContext obtains a fresh token for the Service Principal. -// This method is not safe for concurrent use and should be syncrhonized. -func (spt *ServicePrincipalToken) RefreshWithContext(ctx context.Context) error { - spt.refreshLock.Lock() - defer spt.refreshLock.Unlock() - return spt.refreshInternal(ctx, spt.inner.Resource) -} - -// RefreshExchange refreshes the token, but for a different resource. -// This method is not safe for concurrent use and should be syncrhonized. -func (spt *ServicePrincipalToken) RefreshExchange(resource string) error { - return spt.RefreshExchangeWithContext(context.Background(), resource) -} - -// RefreshExchangeWithContext refreshes the token, but for a different resource. -// This method is not safe for concurrent use and should be syncrhonized. -func (spt *ServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error { - spt.refreshLock.Lock() - defer spt.refreshLock.Unlock() - return spt.refreshInternal(ctx, resource) -} - -func (spt *ServicePrincipalToken) getGrantType() string { - switch spt.inner.Secret.(type) { - case *ServicePrincipalUsernamePasswordSecret: - return OAuthGrantTypeUserPass - case *ServicePrincipalAuthorizationCodeSecret: - return OAuthGrantTypeAuthorizationCode - default: - return OAuthGrantTypeClientCredentials - } -} - -func isIMDS(u url.URL) bool { - imds, err := url.Parse(msiEndpoint) - if err != nil { - return false - } - return u.Host == imds.Host && u.Path == imds.Path -} - -func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource string) error { - req, err := http.NewRequest(http.MethodPost, spt.inner.OauthConfig.TokenEndpoint.String(), nil) - if err != nil { - return fmt.Errorf("adal: Failed to build the refresh request. Error = '%v'", err) - } - req.Header.Add("User-Agent", UserAgent()) - req = req.WithContext(ctx) - if !isIMDS(spt.inner.OauthConfig.TokenEndpoint) { - v := url.Values{} - v.Set("client_id", spt.inner.ClientID) - v.Set("resource", resource) - - if spt.inner.Token.RefreshToken != "" { - v.Set("grant_type", OAuthGrantTypeRefreshToken) - v.Set("refresh_token", spt.inner.Token.RefreshToken) - // web apps must specify client_secret when refreshing tokens - // see https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-protocols-oauth-code#refreshing-the-access-tokens - if spt.getGrantType() == OAuthGrantTypeAuthorizationCode { - err := spt.inner.Secret.SetAuthenticationValues(spt, &v) - if err != nil { - return err - } - } - } else { - v.Set("grant_type", spt.getGrantType()) - err := spt.inner.Secret.SetAuthenticationValues(spt, &v) - if err != nil { - return err - } - } - - s := v.Encode() - body := ioutil.NopCloser(strings.NewReader(s)) - req.ContentLength = int64(len(s)) - req.Header.Set(contentType, mimeTypeFormPost) - req.Body = body - } - - if _, ok := spt.inner.Secret.(*ServicePrincipalMSISecret); ok { - req.Method = http.MethodGet - req.Header.Set(metadataHeader, "true") - } - - var resp *http.Response - if isIMDS(spt.inner.OauthConfig.TokenEndpoint) { - resp, err = retryForIMDS(spt.sender, req, spt.MaxMSIRefreshAttempts) - } else { - resp, err = spt.sender.Do(req) - } - if err != nil { - return newTokenRefreshError(fmt.Sprintf("adal: Failed to execute the refresh request. Error = '%v'", err), nil) - } - - defer resp.Body.Close() - rb, err := ioutil.ReadAll(resp.Body) - - if resp.StatusCode != http.StatusOK { - if err != nil { - return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Failed reading response body: %v", resp.StatusCode, err), resp) - } - return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Response body: %s", resp.StatusCode, string(rb)), resp) - } - - // for the following error cases don't return a TokenRefreshError. the operation succeeded - // but some transient failure happened during deserialization. by returning a generic error - // the retry logic will kick in (we don't retry on TokenRefreshError). - - if err != nil { - return fmt.Errorf("adal: Failed to read a new service principal token during refresh. Error = '%v'", err) - } - if len(strings.Trim(string(rb), " ")) == 0 { - return fmt.Errorf("adal: Empty service principal token received during refresh") - } - var token Token - err = json.Unmarshal(rb, &token) - if err != nil { - return fmt.Errorf("adal: Failed to unmarshal the service principal token during refresh. Error = '%v' JSON = '%s'", err, string(rb)) - } - - spt.inner.Token = token - - return spt.InvokeRefreshCallbacks(token) -} - -// retry logic specific to retrieving a token from the IMDS endpoint -func retryForIMDS(sender Sender, req *http.Request, maxAttempts int) (resp *http.Response, err error) { - // copied from client.go due to circular dependency - retries := []int{ - http.StatusRequestTimeout, // 408 - http.StatusTooManyRequests, // 429 - http.StatusInternalServerError, // 500 - http.StatusBadGateway, // 502 - http.StatusServiceUnavailable, // 503 - http.StatusGatewayTimeout, // 504 - } - // extra retry status codes specific to IMDS - retries = append(retries, - http.StatusNotFound, - http.StatusGone, - // all remaining 5xx - http.StatusNotImplemented, - http.StatusHTTPVersionNotSupported, - http.StatusVariantAlsoNegotiates, - http.StatusInsufficientStorage, - http.StatusLoopDetected, - http.StatusNotExtended, - http.StatusNetworkAuthenticationRequired) - - // see https://docs.microsoft.com/en-us/azure/active-directory/managed-service-identity/how-to-use-vm-token#retry-guidance - - const maxDelay time.Duration = 60 * time.Second - - attempt := 0 - delay := time.Duration(0) - - for attempt < maxAttempts { - resp, err = sender.Do(req) - // retry on temporary network errors, e.g. transient network failures. - // if we don't receive a response then assume we can't connect to the - // endpoint so we're likely not running on an Azure VM so don't retry. - if (err != nil && !isTemporaryNetworkError(err)) || resp == nil || resp.StatusCode == http.StatusOK || !containsInt(retries, resp.StatusCode) { - return - } - - // perform exponential backoff with a cap. - // must increment attempt before calculating delay. - attempt++ - // the base value of 2 is the "delta backoff" as specified in the guidance doc - delay += (time.Duration(math.Pow(2, float64(attempt))) * time.Second) - if delay > maxDelay { - delay = maxDelay - } - - select { - case <-time.After(delay): - // intentionally left blank - case <-req.Context().Done(): - err = req.Context().Err() - return - } - } - return -} - -// returns true if the specified error is a temporary network error or false if it's not. -// if the error doesn't implement the net.Error interface the return value is true. -func isTemporaryNetworkError(err error) bool { - if netErr, ok := err.(net.Error); !ok || (ok && netErr.Temporary()) { - return true - } - return false -} - -// returns true if slice ints contains the value n -func containsInt(ints []int, n int) bool { - for _, i := range ints { - if i == n { - return true - } - } - return false -} - -// SetAutoRefresh enables or disables automatic refreshing of stale tokens. -func (spt *ServicePrincipalToken) SetAutoRefresh(autoRefresh bool) { - spt.inner.AutoRefresh = autoRefresh -} - -// SetRefreshWithin sets the interval within which if the token will expire, EnsureFresh will -// refresh the token. -func (spt *ServicePrincipalToken) SetRefreshWithin(d time.Duration) { - spt.inner.RefreshWithin = d - return -} - -// SetSender sets the http.Client used when obtaining the Service Principal token. An -// undecorated http.Client is used by default. -func (spt *ServicePrincipalToken) SetSender(s Sender) { spt.sender = s } - -// OAuthToken implements the OAuthTokenProvider interface. It returns the current access token. -func (spt *ServicePrincipalToken) OAuthToken() string { - spt.refreshLock.RLock() - defer spt.refreshLock.RUnlock() - return spt.inner.Token.OAuthToken() -} - -// Token returns a copy of the current token. -func (spt *ServicePrincipalToken) Token() Token { - spt.refreshLock.RLock() - defer spt.refreshLock.RUnlock() - return spt.inner.Token -} diff --git a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/adal/version.go b/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/adal/version.go deleted file mode 100644 index c867b34..0000000 --- a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/adal/version.go +++ /dev/null @@ -1,45 +0,0 @@ -package adal - -import ( - "fmt" - "runtime" -) - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -const number = "v1.0.0" - -var ( - ua = fmt.Sprintf("Go/%s (%s-%s) go-autorest/adal/%s", - runtime.Version(), - runtime.GOARCH, - runtime.GOOS, - number, - ) -) - -// UserAgent returns a string containing the Go version, system architecture and OS, and the adal version. -func UserAgent() string { - return ua -} - -// AddToUserAgent adds an extension to the current user agent -func AddToUserAgent(extension string) error { - if extension != "" { - ua = fmt.Sprintf("%s %s", ua, extension) - return nil - } - return fmt.Errorf("Extension was empty, User Agent remained as '%s'", ua) -} diff --git a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/authorization.go b/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/authorization.go deleted file mode 100644 index 2e24b4b..0000000 --- a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/authorization.go +++ /dev/null @@ -1,287 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "encoding/base64" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/Azure/go-autorest/autorest/adal" - "github.com/Azure/go-autorest/tracing" -) - -const ( - bearerChallengeHeader = "Www-Authenticate" - bearer = "Bearer" - tenantID = "tenantID" - apiKeyAuthorizerHeader = "Ocp-Apim-Subscription-Key" - bingAPISdkHeader = "X-BingApis-SDK-Client" - golangBingAPISdkHeaderValue = "Go-SDK" - authorization = "Authorization" - basic = "Basic" -) - -// Authorizer is the interface that provides a PrepareDecorator used to supply request -// authorization. Most often, the Authorizer decorator runs last so it has access to the full -// state of the formed HTTP request. -type Authorizer interface { - WithAuthorization() PrepareDecorator -} - -// NullAuthorizer implements a default, "do nothing" Authorizer. -type NullAuthorizer struct{} - -// WithAuthorization returns a PrepareDecorator that does nothing. -func (na NullAuthorizer) WithAuthorization() PrepareDecorator { - return WithNothing() -} - -// APIKeyAuthorizer implements API Key authorization. -type APIKeyAuthorizer struct { - headers map[string]interface{} - queryParameters map[string]interface{} -} - -// NewAPIKeyAuthorizerWithHeaders creates an ApiKeyAuthorizer with headers. -func NewAPIKeyAuthorizerWithHeaders(headers map[string]interface{}) *APIKeyAuthorizer { - return NewAPIKeyAuthorizer(headers, nil) -} - -// NewAPIKeyAuthorizerWithQueryParameters creates an ApiKeyAuthorizer with query parameters. -func NewAPIKeyAuthorizerWithQueryParameters(queryParameters map[string]interface{}) *APIKeyAuthorizer { - return NewAPIKeyAuthorizer(nil, queryParameters) -} - -// NewAPIKeyAuthorizer creates an ApiKeyAuthorizer with headers. -func NewAPIKeyAuthorizer(headers map[string]interface{}, queryParameters map[string]interface{}) *APIKeyAuthorizer { - return &APIKeyAuthorizer{headers: headers, queryParameters: queryParameters} -} - -// WithAuthorization returns a PrepareDecorator that adds an HTTP headers and Query Parameters. -func (aka *APIKeyAuthorizer) WithAuthorization() PrepareDecorator { - return func(p Preparer) Preparer { - return DecoratePreparer(p, WithHeaders(aka.headers), WithQueryParameters(aka.queryParameters)) - } -} - -// CognitiveServicesAuthorizer implements authorization for Cognitive Services. -type CognitiveServicesAuthorizer struct { - subscriptionKey string -} - -// NewCognitiveServicesAuthorizer is -func NewCognitiveServicesAuthorizer(subscriptionKey string) *CognitiveServicesAuthorizer { - return &CognitiveServicesAuthorizer{subscriptionKey: subscriptionKey} -} - -// WithAuthorization is -func (csa *CognitiveServicesAuthorizer) WithAuthorization() PrepareDecorator { - headers := make(map[string]interface{}) - headers[apiKeyAuthorizerHeader] = csa.subscriptionKey - headers[bingAPISdkHeader] = golangBingAPISdkHeaderValue - - return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() -} - -// BearerAuthorizer implements the bearer authorization -type BearerAuthorizer struct { - tokenProvider adal.OAuthTokenProvider -} - -// NewBearerAuthorizer crates a BearerAuthorizer using the given token provider -func NewBearerAuthorizer(tp adal.OAuthTokenProvider) *BearerAuthorizer { - return &BearerAuthorizer{tokenProvider: tp} -} - -// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose -// value is "Bearer " followed by the token. -// -// By default, the token will be automatically refreshed through the Refresher interface. -func (ba *BearerAuthorizer) WithAuthorization() PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - // the ordering is important here, prefer RefresherWithContext if available - if refresher, ok := ba.tokenProvider.(adal.RefresherWithContext); ok { - err = refresher.EnsureFreshWithContext(r.Context()) - } else if refresher, ok := ba.tokenProvider.(adal.Refresher); ok { - err = refresher.EnsureFresh() - } - if err != nil { - var resp *http.Response - if tokError, ok := err.(adal.TokenRefreshError); ok { - resp = tokError.Response() - } - return r, NewErrorWithError(err, "azure.BearerAuthorizer", "WithAuthorization", resp, - "Failed to refresh the Token for request to %s", r.URL) - } - return Prepare(r, WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", ba.tokenProvider.OAuthToken()))) - } - return r, err - }) - } -} - -// BearerAuthorizerCallbackFunc is the authentication callback signature. -type BearerAuthorizerCallbackFunc func(tenantID, resource string) (*BearerAuthorizer, error) - -// BearerAuthorizerCallback implements bearer authorization via a callback. -type BearerAuthorizerCallback struct { - sender Sender - callback BearerAuthorizerCallbackFunc -} - -// NewBearerAuthorizerCallback creates a bearer authorization callback. The callback -// is invoked when the HTTP request is submitted. -func NewBearerAuthorizerCallback(sender Sender, callback BearerAuthorizerCallbackFunc) *BearerAuthorizerCallback { - if sender == nil { - sender = &http.Client{Transport: tracing.Transport} - } - return &BearerAuthorizerCallback{sender: sender, callback: callback} -} - -// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose value -// is "Bearer " followed by the token. The BearerAuthorizer is obtained via a user-supplied callback. -// -// By default, the token will be automatically refreshed through the Refresher interface. -func (bacb *BearerAuthorizerCallback) WithAuthorization() PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - // make a copy of the request and remove the body as it's not - // required and avoids us having to create a copy of it. - rCopy := *r - removeRequestBody(&rCopy) - - resp, err := bacb.sender.Do(&rCopy) - if err == nil && resp.StatusCode == 401 { - defer resp.Body.Close() - if hasBearerChallenge(resp) { - bc, err := newBearerChallenge(resp) - if err != nil { - return r, err - } - if bacb.callback != nil { - ba, err := bacb.callback(bc.values[tenantID], bc.values["resource"]) - if err != nil { - return r, err - } - return Prepare(r, ba.WithAuthorization()) - } - } - } - } - return r, err - }) - } -} - -// returns true if the HTTP response contains a bearer challenge -func hasBearerChallenge(resp *http.Response) bool { - authHeader := resp.Header.Get(bearerChallengeHeader) - if len(authHeader) == 0 || strings.Index(authHeader, bearer) < 0 { - return false - } - return true -} - -type bearerChallenge struct { - values map[string]string -} - -func newBearerChallenge(resp *http.Response) (bc bearerChallenge, err error) { - challenge := strings.TrimSpace(resp.Header.Get(bearerChallengeHeader)) - trimmedChallenge := challenge[len(bearer)+1:] - - // challenge is a set of key=value pairs that are comma delimited - pairs := strings.Split(trimmedChallenge, ",") - if len(pairs) < 1 { - err = fmt.Errorf("challenge '%s' contains no pairs", challenge) - return bc, err - } - - bc.values = make(map[string]string) - for i := range pairs { - trimmedPair := strings.TrimSpace(pairs[i]) - pair := strings.Split(trimmedPair, "=") - if len(pair) == 2 { - // remove the enclosing quotes - key := strings.Trim(pair[0], "\"") - value := strings.Trim(pair[1], "\"") - - switch key { - case "authorization", "authorization_uri": - // strip the tenant ID from the authorization URL - asURL, err := url.Parse(value) - if err != nil { - return bc, err - } - bc.values[tenantID] = asURL.Path[1:] - default: - bc.values[key] = value - } - } - } - - return bc, err -} - -// EventGridKeyAuthorizer implements authorization for event grid using key authentication. -type EventGridKeyAuthorizer struct { - topicKey string -} - -// NewEventGridKeyAuthorizer creates a new EventGridKeyAuthorizer -// with the specified topic key. -func NewEventGridKeyAuthorizer(topicKey string) EventGridKeyAuthorizer { - return EventGridKeyAuthorizer{topicKey: topicKey} -} - -// WithAuthorization returns a PrepareDecorator that adds the aeg-sas-key authentication header. -func (egta EventGridKeyAuthorizer) WithAuthorization() PrepareDecorator { - headers := map[string]interface{}{ - "aeg-sas-key": egta.topicKey, - } - return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() -} - -// BasicAuthorizer implements basic HTTP authorization by adding the Authorization HTTP header -// with the value "Basic " where is a base64-encoded username:password tuple. -type BasicAuthorizer struct { - userName string - password string -} - -// NewBasicAuthorizer creates a new BasicAuthorizer with the specified username and password. -func NewBasicAuthorizer(userName, password string) *BasicAuthorizer { - return &BasicAuthorizer{ - userName: userName, - password: password, - } -} - -// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose -// value is "Basic " followed by the base64-encoded username:password tuple. -func (ba *BasicAuthorizer) WithAuthorization() PrepareDecorator { - headers := make(map[string]interface{}) - headers[authorization] = basic + " " + base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", ba.userName, ba.password))) - - return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() -} diff --git a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/autorest.go b/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/autorest.go deleted file mode 100644 index aafdf02..0000000 --- a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/autorest.go +++ /dev/null @@ -1,150 +0,0 @@ -/* -Package autorest implements an HTTP request pipeline suitable for use across multiple go-routines -and provides the shared routines relied on by AutoRest (see https://github.com/Azure/autorest/) -generated Go code. - -The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending, -and Responding. A typical pattern is: - - req, err := Prepare(&http.Request{}, - token.WithAuthorization()) - - resp, err := Send(req, - WithLogging(logger), - DoErrorIfStatusCode(http.StatusInternalServerError), - DoCloseIfError(), - DoRetryForAttempts(5, time.Second)) - - err = Respond(resp, - ByDiscardingBody(), - ByClosing()) - -Each phase relies on decorators to modify and / or manage processing. Decorators may first modify -and then pass the data along, pass the data first and then modify the result, or wrap themselves -around passing the data (such as a logger might do). Decorators run in the order provided. For -example, the following: - - req, err := Prepare(&http.Request{}, - WithBaseURL("https://microsoft.com/"), - WithPath("a"), - WithPath("b"), - WithPath("c")) - -will set the URL to: - - https://microsoft.com/a/b/c - -Preparers and Responders may be shared and re-used (assuming the underlying decorators support -sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders -shared among multiple go-routines, and a single Sender shared among multiple sending go-routines, -all bound together by means of input / output channels. - -Decorators hold their passed state within a closure (such as the path components in the example -above). Be careful to share Preparers and Responders only in a context where such held state -applies. For example, it may not make sense to share a Preparer that applies a query string from a -fixed set of values. Similarly, sharing a Responder that reads the response body into a passed -struct (e.g., ByUnmarshallingJson) is likely incorrect. - -Lastly, the Swagger specification (https://swagger.io) that drives AutoRest -(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The -github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure -correct parsing and formatting. - -Errors raised by autorest objects and methods will conform to the autorest.Error interface. - -See the included examples for more detail. For details on the suggested use of this package by -generated clients, see the Client described below. -*/ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "context" - "net/http" - "time" -) - -const ( - // HeaderLocation specifies the HTTP Location header. - HeaderLocation = "Location" - - // HeaderRetryAfter specifies the HTTP Retry-After header. - HeaderRetryAfter = "Retry-After" -) - -// ResponseHasStatusCode returns true if the status code in the HTTP Response is in the passed set -// and false otherwise. -func ResponseHasStatusCode(resp *http.Response, codes ...int) bool { - if resp == nil { - return false - } - return containsInt(codes, resp.StatusCode) -} - -// GetLocation retrieves the URL from the Location header of the passed response. -func GetLocation(resp *http.Response) string { - return resp.Header.Get(HeaderLocation) -} - -// GetRetryAfter extracts the retry delay from the Retry-After header of the passed response. If -// the header is absent or is malformed, it will return the supplied default delay time.Duration. -func GetRetryAfter(resp *http.Response, defaultDelay time.Duration) time.Duration { - retry := resp.Header.Get(HeaderRetryAfter) - if retry == "" { - return defaultDelay - } - - d, err := time.ParseDuration(retry + "s") - if err != nil { - return defaultDelay - } - - return d -} - -// NewPollingRequest allocates and returns a new http.Request to poll for the passed response. -func NewPollingRequest(resp *http.Response, cancel <-chan struct{}) (*http.Request, error) { - location := GetLocation(resp) - if location == "" { - return nil, NewErrorWithResponse("autorest", "NewPollingRequest", resp, "Location header missing from response that requires polling") - } - - req, err := Prepare(&http.Request{Cancel: cancel}, - AsGet(), - WithBaseURL(location)) - if err != nil { - return nil, NewErrorWithError(err, "autorest", "NewPollingRequest", nil, "Failure creating poll request to %s", location) - } - - return req, nil -} - -// NewPollingRequestWithContext allocates and returns a new http.Request with the specified context to poll for the passed response. -func NewPollingRequestWithContext(ctx context.Context, resp *http.Response) (*http.Request, error) { - location := GetLocation(resp) - if location == "" { - return nil, NewErrorWithResponse("autorest", "NewPollingRequestWithContext", resp, "Location header missing from response that requires polling") - } - - req, err := Prepare((&http.Request{}).WithContext(ctx), - AsGet(), - WithBaseURL(location)) - if err != nil { - return nil, NewErrorWithError(err, "autorest", "NewPollingRequestWithContext", nil, "Failure creating poll request to %s", location) - } - - return req, nil -} diff --git a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/azure/async.go b/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/azure/async.go deleted file mode 100644 index 0041eac..0000000 --- a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/azure/async.go +++ /dev/null @@ -1,992 +0,0 @@ -package azure - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "strings" - "time" - - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/tracing" -) - -const ( - headerAsyncOperation = "Azure-AsyncOperation" -) - -const ( - operationInProgress string = "InProgress" - operationCanceled string = "Canceled" - operationFailed string = "Failed" - operationSucceeded string = "Succeeded" -) - -var pollingCodes = [...]int{http.StatusNoContent, http.StatusAccepted, http.StatusCreated, http.StatusOK} - -// Future provides a mechanism to access the status and results of an asynchronous request. -// Since futures are stateful they should be passed by value to avoid race conditions. -type Future struct { - req *http.Request // legacy - pt pollingTracker -} - -// NewFuture returns a new Future object initialized with the specified request. -// Deprecated: Please use NewFutureFromResponse instead. -func NewFuture(req *http.Request) Future { - return Future{req: req} -} - -// NewFutureFromResponse returns a new Future object initialized -// with the initial response from an asynchronous operation. -func NewFutureFromResponse(resp *http.Response) (Future, error) { - pt, err := createPollingTracker(resp) - return Future{pt: pt}, err -} - -// Response returns the last HTTP response. -func (f Future) Response() *http.Response { - if f.pt == nil { - return nil - } - return f.pt.latestResponse() -} - -// Status returns the last status message of the operation. -func (f Future) Status() string { - if f.pt == nil { - return "" - } - return f.pt.pollingStatus() -} - -// PollingMethod returns the method used to monitor the status of the asynchronous operation. -func (f Future) PollingMethod() PollingMethodType { - if f.pt == nil { - return PollingUnknown - } - return f.pt.pollingMethod() -} - -// Done queries the service to see if the operation has completed. -// Deprecated: Use DoneWithContext() -func (f *Future) Done(sender autorest.Sender) (bool, error) { - return f.DoneWithContext(context.Background(), sender) -} - -// DoneWithContext queries the service to see if the operation has completed. -func (f *Future) DoneWithContext(ctx context.Context, sender autorest.Sender) (done bool, err error) { - ctx = tracing.StartSpan(ctx, "github.com/Azure/go-autorest/autorest/azure/async.DoneWithContext") - defer func() { - sc := -1 - resp := f.Response() - if resp != nil { - sc = resp.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - - // support for legacy Future implementation - if f.req != nil { - resp, err := sender.Do(f.req) - if err != nil { - return false, err - } - pt, err := createPollingTracker(resp) - if err != nil { - return false, err - } - f.pt = pt - f.req = nil - } - // end legacy - if f.pt == nil { - return false, autorest.NewError("Future", "Done", "future is not initialized") - } - if f.pt.hasTerminated() { - return true, f.pt.pollingError() - } - if err := f.pt.pollForStatus(ctx, sender); err != nil { - return false, err - } - if err := f.pt.checkForErrors(); err != nil { - return f.pt.hasTerminated(), err - } - if err := f.pt.updatePollingState(f.pt.provisioningStateApplicable()); err != nil { - return false, err - } - if err := f.pt.initPollingMethod(); err != nil { - return false, err - } - if err := f.pt.updatePollingMethod(); err != nil { - return false, err - } - return f.pt.hasTerminated(), f.pt.pollingError() -} - -// GetPollingDelay returns a duration the application should wait before checking -// the status of the asynchronous request and true; this value is returned from -// the service via the Retry-After response header. If the header wasn't returned -// then the function returns the zero-value time.Duration and false. -func (f Future) GetPollingDelay() (time.Duration, bool) { - if f.pt == nil { - return 0, false - } - resp := f.pt.latestResponse() - if resp == nil { - return 0, false - } - - retry := resp.Header.Get(autorest.HeaderRetryAfter) - if retry == "" { - return 0, false - } - - d, err := time.ParseDuration(retry + "s") - if err != nil { - panic(err) - } - - return d, true -} - -// WaitForCompletion will return when one of the following conditions is met: the long -// running operation has completed, the provided context is cancelled, or the client's -// polling duration has been exceeded. It will retry failed polling attempts based on -// the retry value defined in the client up to the maximum retry attempts. -// Deprecated: Please use WaitForCompletionRef() instead. -func (f Future) WaitForCompletion(ctx context.Context, client autorest.Client) error { - return f.WaitForCompletionRef(ctx, client) -} - -// WaitForCompletionRef will return when one of the following conditions is met: the long -// running operation has completed, the provided context is cancelled, or the client's -// polling duration has been exceeded. It will retry failed polling attempts based on -// the retry value defined in the client up to the maximum retry attempts. -// If no deadline is specified in the context then the client.PollingDuration will be -// used to determine if a default deadline should be used. -// If PollingDuration is greater than zero the value will be used as the context's timeout. -// If PollingDuration is zero then no default deadline will be used. -func (f *Future) WaitForCompletionRef(ctx context.Context, client autorest.Client) (err error) { - ctx = tracing.StartSpan(ctx, "github.com/Azure/go-autorest/autorest/azure/async.WaitForCompletionRef") - defer func() { - sc := -1 - resp := f.Response() - if resp != nil { - sc = resp.StatusCode - } - tracing.EndSpan(ctx, sc, err) - }() - cancelCtx := ctx - // if the provided context already has a deadline don't override it - _, hasDeadline := ctx.Deadline() - if d := client.PollingDuration; !hasDeadline && d != 0 { - var cancel context.CancelFunc - cancelCtx, cancel = context.WithTimeout(ctx, d) - defer cancel() - } - - done, err := f.DoneWithContext(ctx, client) - for attempts := 0; !done; done, err = f.DoneWithContext(ctx, client) { - if attempts >= client.RetryAttempts { - return autorest.NewErrorWithError(err, "Future", "WaitForCompletion", f.pt.latestResponse(), "the number of retries has been exceeded") - } - // we want delayAttempt to be zero in the non-error case so - // that DelayForBackoff doesn't perform exponential back-off - var delayAttempt int - var delay time.Duration - if err == nil { - // check for Retry-After delay, if not present use the client's polling delay - var ok bool - delay, ok = f.GetPollingDelay() - if !ok { - delay = client.PollingDelay - } - } else { - // there was an error polling for status so perform exponential - // back-off based on the number of attempts using the client's retry - // duration. update attempts after delayAttempt to avoid off-by-one. - delayAttempt = attempts - delay = client.RetryDuration - attempts++ - } - // wait until the delay elapses or the context is cancelled - delayElapsed := autorest.DelayForBackoff(delay, delayAttempt, cancelCtx.Done()) - if !delayElapsed { - return autorest.NewErrorWithError(cancelCtx.Err(), "Future", "WaitForCompletion", f.pt.latestResponse(), "context has been cancelled") - } - } - return -} - -// MarshalJSON implements the json.Marshaler interface. -func (f Future) MarshalJSON() ([]byte, error) { - return json.Marshal(f.pt) -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (f *Future) UnmarshalJSON(data []byte) error { - // unmarshal into JSON object to determine the tracker type - obj := map[string]interface{}{} - err := json.Unmarshal(data, &obj) - if err != nil { - return err - } - if obj["method"] == nil { - return autorest.NewError("Future", "UnmarshalJSON", "missing 'method' property") - } - method := obj["method"].(string) - switch strings.ToUpper(method) { - case http.MethodDelete: - f.pt = &pollingTrackerDelete{} - case http.MethodPatch: - f.pt = &pollingTrackerPatch{} - case http.MethodPost: - f.pt = &pollingTrackerPost{} - case http.MethodPut: - f.pt = &pollingTrackerPut{} - default: - return autorest.NewError("Future", "UnmarshalJSON", "unsupoorted method '%s'", method) - } - // now unmarshal into the tracker - return json.Unmarshal(data, &f.pt) -} - -// PollingURL returns the URL used for retrieving the status of the long-running operation. -func (f Future) PollingURL() string { - if f.pt == nil { - return "" - } - return f.pt.pollingURL() -} - -// GetResult should be called once polling has completed successfully. -// It makes the final GET call to retrieve the resultant payload. -func (f Future) GetResult(sender autorest.Sender) (*http.Response, error) { - if f.pt.finalGetURL() == "" { - // we can end up in this situation if the async operation returns a 200 - // with no polling URLs. in that case return the response which should - // contain the JSON payload (only do this for successful terminal cases). - if lr := f.pt.latestResponse(); lr != nil && f.pt.hasSucceeded() { - return lr, nil - } - return nil, autorest.NewError("Future", "GetResult", "missing URL for retrieving result") - } - req, err := http.NewRequest(http.MethodGet, f.pt.finalGetURL(), nil) - if err != nil { - return nil, err - } - return sender.Do(req) -} - -type pollingTracker interface { - // these methods can differ per tracker - - // checks the response headers and status code to determine the polling mechanism - updatePollingMethod() error - - // checks the response for tracker-specific error conditions - checkForErrors() error - - // returns true if provisioning state should be checked - provisioningStateApplicable() bool - - // methods common to all trackers - - // initializes a tracker's polling URL and method, called for each iteration. - // these values can be overridden by each polling tracker as required. - initPollingMethod() error - - // initializes the tracker's internal state, call this when the tracker is created - initializeState() error - - // makes an HTTP request to check the status of the LRO - pollForStatus(ctx context.Context, sender autorest.Sender) error - - // updates internal tracker state, call this after each call to pollForStatus - updatePollingState(provStateApl bool) error - - // returns the error response from the service, can be nil - pollingError() error - - // returns the polling method being used - pollingMethod() PollingMethodType - - // returns the state of the LRO as returned from the service - pollingStatus() string - - // returns the URL used for polling status - pollingURL() string - - // returns the URL used for the final GET to retrieve the resource - finalGetURL() string - - // returns true if the LRO is in a terminal state - hasTerminated() bool - - // returns true if the LRO is in a failed terminal state - hasFailed() bool - - // returns true if the LRO is in a successful terminal state - hasSucceeded() bool - - // returns the cached HTTP response after a call to pollForStatus(), can be nil - latestResponse() *http.Response -} - -type pollingTrackerBase struct { - // resp is the last response, either from the submission of the LRO or from polling - resp *http.Response - - // method is the HTTP verb, this is needed for deserialization - Method string `json:"method"` - - // rawBody is the raw JSON response body - rawBody map[string]interface{} - - // denotes if polling is using async-operation or location header - Pm PollingMethodType `json:"pollingMethod"` - - // the URL to poll for status - URI string `json:"pollingURI"` - - // the state of the LRO as returned from the service - State string `json:"lroState"` - - // the URL to GET for the final result - FinalGetURI string `json:"resultURI"` - - // used to hold an error object returned from the service - Err *ServiceError `json:"error,omitempty"` -} - -func (pt *pollingTrackerBase) initializeState() error { - // determine the initial polling state based on response body and/or HTTP status - // code. this is applicable to the initial LRO response, not polling responses! - pt.Method = pt.resp.Request.Method - if err := pt.updateRawBody(); err != nil { - return err - } - switch pt.resp.StatusCode { - case http.StatusOK: - if ps := pt.getProvisioningState(); ps != nil { - pt.State = *ps - if pt.hasFailed() { - pt.updateErrorFromResponse() - return pt.pollingError() - } - } else { - pt.State = operationSucceeded - } - case http.StatusCreated: - if ps := pt.getProvisioningState(); ps != nil { - pt.State = *ps - } else { - pt.State = operationInProgress - } - case http.StatusAccepted: - pt.State = operationInProgress - case http.StatusNoContent: - pt.State = operationSucceeded - default: - pt.State = operationFailed - pt.updateErrorFromResponse() - return pt.pollingError() - } - return pt.initPollingMethod() -} - -func (pt pollingTrackerBase) getProvisioningState() *string { - if pt.rawBody != nil && pt.rawBody["properties"] != nil { - p := pt.rawBody["properties"].(map[string]interface{}) - if ps := p["provisioningState"]; ps != nil { - s := ps.(string) - return &s - } - } - return nil -} - -func (pt *pollingTrackerBase) updateRawBody() error { - pt.rawBody = map[string]interface{}{} - if pt.resp.ContentLength != 0 { - defer pt.resp.Body.Close() - b, err := ioutil.ReadAll(pt.resp.Body) - if err != nil { - return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to read response body") - } - // observed in 204 responses over HTTP/2.0; the content length is -1 but body is empty - if len(b) == 0 { - return nil - } - // put the body back so it's available to other callers - pt.resp.Body = ioutil.NopCloser(bytes.NewReader(b)) - if err = json.Unmarshal(b, &pt.rawBody); err != nil { - return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to unmarshal response body") - } - } - return nil -} - -func (pt *pollingTrackerBase) pollForStatus(ctx context.Context, sender autorest.Sender) error { - req, err := http.NewRequest(http.MethodGet, pt.URI, nil) - if err != nil { - return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to create HTTP request") - } - - req = req.WithContext(ctx) - pt.resp, err = sender.Do(req) - if err != nil { - return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to send HTTP request") - } - if autorest.ResponseHasStatusCode(pt.resp, pollingCodes[:]...) { - // reset the service error on success case - pt.Err = nil - err = pt.updateRawBody() - } else { - // check response body for error content - pt.updateErrorFromResponse() - err = pt.pollingError() - } - return err -} - -// attempts to unmarshal a ServiceError type from the response body. -// if that fails then make a best attempt at creating something meaningful. -// NOTE: this assumes that the async operation has failed. -func (pt *pollingTrackerBase) updateErrorFromResponse() { - var err error - if pt.resp.ContentLength != 0 { - type respErr struct { - ServiceError *ServiceError `json:"error"` - } - re := respErr{} - defer pt.resp.Body.Close() - var b []byte - if b, err = ioutil.ReadAll(pt.resp.Body); err != nil || len(b) == 0 { - goto Default - } - if err = json.Unmarshal(b, &re); err != nil { - goto Default - } - // unmarshalling the error didn't yield anything, try unwrapped error - if re.ServiceError == nil { - err = json.Unmarshal(b, &re.ServiceError) - if err != nil { - goto Default - } - } - // the unmarshaller will ensure re.ServiceError is non-nil - // even if there was no content unmarshalled so check the code. - if re.ServiceError.Code != "" { - pt.Err = re.ServiceError - return - } - } -Default: - se := &ServiceError{ - Code: pt.pollingStatus(), - Message: "The async operation failed.", - } - if err != nil { - se.InnerError = make(map[string]interface{}) - se.InnerError["unmarshalError"] = err.Error() - } - // stick the response body into the error object in hopes - // it contains something useful to help diagnose the failure. - if len(pt.rawBody) > 0 { - se.AdditionalInfo = []map[string]interface{}{ - pt.rawBody, - } - } - pt.Err = se -} - -func (pt *pollingTrackerBase) updatePollingState(provStateApl bool) error { - if pt.Pm == PollingAsyncOperation && pt.rawBody["status"] != nil { - pt.State = pt.rawBody["status"].(string) - } else { - if pt.resp.StatusCode == http.StatusAccepted { - pt.State = operationInProgress - } else if provStateApl { - if ps := pt.getProvisioningState(); ps != nil { - pt.State = *ps - } else { - pt.State = operationSucceeded - } - } else { - return autorest.NewError("pollingTrackerBase", "updatePollingState", "the response from the async operation has an invalid status code") - } - } - // if the operation has failed update the error state - if pt.hasFailed() { - pt.updateErrorFromResponse() - } - return nil -} - -func (pt pollingTrackerBase) pollingError() error { - if pt.Err == nil { - return nil - } - return pt.Err -} - -func (pt pollingTrackerBase) pollingMethod() PollingMethodType { - return pt.Pm -} - -func (pt pollingTrackerBase) pollingStatus() string { - return pt.State -} - -func (pt pollingTrackerBase) pollingURL() string { - return pt.URI -} - -func (pt pollingTrackerBase) finalGetURL() string { - return pt.FinalGetURI -} - -func (pt pollingTrackerBase) hasTerminated() bool { - return strings.EqualFold(pt.State, operationCanceled) || strings.EqualFold(pt.State, operationFailed) || strings.EqualFold(pt.State, operationSucceeded) -} - -func (pt pollingTrackerBase) hasFailed() bool { - return strings.EqualFold(pt.State, operationCanceled) || strings.EqualFold(pt.State, operationFailed) -} - -func (pt pollingTrackerBase) hasSucceeded() bool { - return strings.EqualFold(pt.State, operationSucceeded) -} - -func (pt pollingTrackerBase) latestResponse() *http.Response { - return pt.resp -} - -// error checking common to all trackers -func (pt pollingTrackerBase) baseCheckForErrors() error { - // for Azure-AsyncOperations the response body cannot be nil or empty - if pt.Pm == PollingAsyncOperation { - if pt.resp.Body == nil || pt.resp.ContentLength == 0 { - return autorest.NewError("pollingTrackerBase", "baseCheckForErrors", "for Azure-AsyncOperation response body cannot be nil") - } - if pt.rawBody["status"] == nil { - return autorest.NewError("pollingTrackerBase", "baseCheckForErrors", "missing status property in Azure-AsyncOperation response body") - } - } - return nil -} - -// default initialization of polling URL/method. each verb tracker will update this as required. -func (pt *pollingTrackerBase) initPollingMethod() error { - if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { - return err - } else if ao != "" { - pt.URI = ao - pt.Pm = PollingAsyncOperation - return nil - } - if lh, err := getURLFromLocationHeader(pt.resp); err != nil { - return err - } else if lh != "" { - pt.URI = lh - pt.Pm = PollingLocation - return nil - } - // it's ok if we didn't find a polling header, this will be handled elsewhere - return nil -} - -// DELETE - -type pollingTrackerDelete struct { - pollingTrackerBase -} - -func (pt *pollingTrackerDelete) updatePollingMethod() error { - // for 201 the Location header is required - if pt.resp.StatusCode == http.StatusCreated { - if lh, err := getURLFromLocationHeader(pt.resp); err != nil { - return err - } else if lh == "" { - return autorest.NewError("pollingTrackerDelete", "updateHeaders", "missing Location header in 201 response") - } else { - pt.URI = lh - } - pt.Pm = PollingLocation - pt.FinalGetURI = pt.URI - } - // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary - if pt.resp.StatusCode == http.StatusAccepted { - ao, err := getURLFromAsyncOpHeader(pt.resp) - if err != nil { - return err - } else if ao != "" { - pt.URI = ao - pt.Pm = PollingAsyncOperation - } - // if the Location header is invalid and we already have a polling URL - // then we don't care if the Location header URL is malformed. - if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { - return err - } else if lh != "" { - if ao == "" { - pt.URI = lh - pt.Pm = PollingLocation - } - // when both headers are returned we use the value in the Location header for the final GET - pt.FinalGetURI = lh - } - // make sure a polling URL was found - if pt.URI == "" { - return autorest.NewError("pollingTrackerPost", "updateHeaders", "didn't get any suitable polling URLs in 202 response") - } - } - return nil -} - -func (pt pollingTrackerDelete) checkForErrors() error { - return pt.baseCheckForErrors() -} - -func (pt pollingTrackerDelete) provisioningStateApplicable() bool { - return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusNoContent -} - -// PATCH - -type pollingTrackerPatch struct { - pollingTrackerBase -} - -func (pt *pollingTrackerPatch) updatePollingMethod() error { - // by default we can use the original URL for polling and final GET - if pt.URI == "" { - pt.URI = pt.resp.Request.URL.String() - } - if pt.FinalGetURI == "" { - pt.FinalGetURI = pt.resp.Request.URL.String() - } - if pt.Pm == PollingUnknown { - pt.Pm = PollingRequestURI - } - // for 201 it's permissible for no headers to be returned - if pt.resp.StatusCode == http.StatusCreated { - if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { - return err - } else if ao != "" { - pt.URI = ao - pt.Pm = PollingAsyncOperation - } - } - // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary - // note the absence of the "final GET" mechanism for PATCH - if pt.resp.StatusCode == http.StatusAccepted { - ao, err := getURLFromAsyncOpHeader(pt.resp) - if err != nil { - return err - } else if ao != "" { - pt.URI = ao - pt.Pm = PollingAsyncOperation - } - if ao == "" { - if lh, err := getURLFromLocationHeader(pt.resp); err != nil { - return err - } else if lh == "" { - return autorest.NewError("pollingTrackerPatch", "updateHeaders", "didn't get any suitable polling URLs in 202 response") - } else { - pt.URI = lh - pt.Pm = PollingLocation - } - } - } - return nil -} - -func (pt pollingTrackerPatch) checkForErrors() error { - return pt.baseCheckForErrors() -} - -func (pt pollingTrackerPatch) provisioningStateApplicable() bool { - return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusCreated -} - -// POST - -type pollingTrackerPost struct { - pollingTrackerBase -} - -func (pt *pollingTrackerPost) updatePollingMethod() error { - // 201 requires Location header - if pt.resp.StatusCode == http.StatusCreated { - if lh, err := getURLFromLocationHeader(pt.resp); err != nil { - return err - } else if lh == "" { - return autorest.NewError("pollingTrackerPost", "updateHeaders", "missing Location header in 201 response") - } else { - pt.URI = lh - pt.FinalGetURI = lh - pt.Pm = PollingLocation - } - } - // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary - if pt.resp.StatusCode == http.StatusAccepted { - ao, err := getURLFromAsyncOpHeader(pt.resp) - if err != nil { - return err - } else if ao != "" { - pt.URI = ao - pt.Pm = PollingAsyncOperation - } - // if the Location header is invalid and we already have a polling URL - // then we don't care if the Location header URL is malformed. - if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { - return err - } else if lh != "" { - if ao == "" { - pt.URI = lh - pt.Pm = PollingLocation - } - // when both headers are returned we use the value in the Location header for the final GET - pt.FinalGetURI = lh - } - // make sure a polling URL was found - if pt.URI == "" { - return autorest.NewError("pollingTrackerPost", "updateHeaders", "didn't get any suitable polling URLs in 202 response") - } - } - return nil -} - -func (pt pollingTrackerPost) checkForErrors() error { - return pt.baseCheckForErrors() -} - -func (pt pollingTrackerPost) provisioningStateApplicable() bool { - return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusNoContent -} - -// PUT - -type pollingTrackerPut struct { - pollingTrackerBase -} - -func (pt *pollingTrackerPut) updatePollingMethod() error { - // by default we can use the original URL for polling and final GET - if pt.URI == "" { - pt.URI = pt.resp.Request.URL.String() - } - if pt.FinalGetURI == "" { - pt.FinalGetURI = pt.resp.Request.URL.String() - } - if pt.Pm == PollingUnknown { - pt.Pm = PollingRequestURI - } - // for 201 it's permissible for no headers to be returned - if pt.resp.StatusCode == http.StatusCreated { - if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { - return err - } else if ao != "" { - pt.URI = ao - pt.Pm = PollingAsyncOperation - } - } - // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary - if pt.resp.StatusCode == http.StatusAccepted { - ao, err := getURLFromAsyncOpHeader(pt.resp) - if err != nil { - return err - } else if ao != "" { - pt.URI = ao - pt.Pm = PollingAsyncOperation - } - // if the Location header is invalid and we already have a polling URL - // then we don't care if the Location header URL is malformed. - if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { - return err - } else if lh != "" { - if ao == "" { - pt.URI = lh - pt.Pm = PollingLocation - } - } - // make sure a polling URL was found - if pt.URI == "" { - return autorest.NewError("pollingTrackerPut", "updateHeaders", "didn't get any suitable polling URLs in 202 response") - } - } - return nil -} - -func (pt pollingTrackerPut) checkForErrors() error { - err := pt.baseCheckForErrors() - if err != nil { - return err - } - // if there are no LRO headers then the body cannot be empty - ao, err := getURLFromAsyncOpHeader(pt.resp) - if err != nil { - return err - } - lh, err := getURLFromLocationHeader(pt.resp) - if err != nil { - return err - } - if ao == "" && lh == "" && len(pt.rawBody) == 0 { - return autorest.NewError("pollingTrackerPut", "checkForErrors", "the response did not contain a body") - } - return nil -} - -func (pt pollingTrackerPut) provisioningStateApplicable() bool { - return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusCreated -} - -// creates a polling tracker based on the verb of the original request -func createPollingTracker(resp *http.Response) (pollingTracker, error) { - var pt pollingTracker - switch strings.ToUpper(resp.Request.Method) { - case http.MethodDelete: - pt = &pollingTrackerDelete{pollingTrackerBase: pollingTrackerBase{resp: resp}} - case http.MethodPatch: - pt = &pollingTrackerPatch{pollingTrackerBase: pollingTrackerBase{resp: resp}} - case http.MethodPost: - pt = &pollingTrackerPost{pollingTrackerBase: pollingTrackerBase{resp: resp}} - case http.MethodPut: - pt = &pollingTrackerPut{pollingTrackerBase: pollingTrackerBase{resp: resp}} - default: - return nil, autorest.NewError("azure", "createPollingTracker", "unsupported HTTP method %s", resp.Request.Method) - } - if err := pt.initializeState(); err != nil { - return pt, err - } - // this initializes the polling header values, we do this during creation in case the - // initial response send us invalid values; this way the API call will return a non-nil - // error (not doing this means the error shows up in Future.Done) - return pt, pt.updatePollingMethod() -} - -// gets the polling URL from the Azure-AsyncOperation header. -// ensures the URL is well-formed and absolute. -func getURLFromAsyncOpHeader(resp *http.Response) (string, error) { - s := resp.Header.Get(http.CanonicalHeaderKey(headerAsyncOperation)) - if s == "" { - return "", nil - } - if !isValidURL(s) { - return "", autorest.NewError("azure", "getURLFromAsyncOpHeader", "invalid polling URL '%s'", s) - } - return s, nil -} - -// gets the polling URL from the Location header. -// ensures the URL is well-formed and absolute. -func getURLFromLocationHeader(resp *http.Response) (string, error) { - s := resp.Header.Get(http.CanonicalHeaderKey(autorest.HeaderLocation)) - if s == "" { - return "", nil - } - if !isValidURL(s) { - return "", autorest.NewError("azure", "getURLFromLocationHeader", "invalid polling URL '%s'", s) - } - return s, nil -} - -// verify that the URL is valid and absolute -func isValidURL(s string) bool { - u, err := url.Parse(s) - return err == nil && u.IsAbs() -} - -// DoPollForAsynchronous returns a SendDecorator that polls if the http.Response is for an Azure -// long-running operation. It will delay between requests for the duration specified in the -// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled via -// the context associated with the http.Request. -// Deprecated: Prefer using Futures to allow for non-blocking async operations. -func DoPollForAsynchronous(delay time.Duration) autorest.SendDecorator { - return func(s autorest.Sender) autorest.Sender { - return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { - resp, err := s.Do(r) - if err != nil { - return resp, err - } - if !autorest.ResponseHasStatusCode(resp, pollingCodes[:]...) { - return resp, nil - } - future, err := NewFutureFromResponse(resp) - if err != nil { - return resp, err - } - // retry until either the LRO completes or we receive an error - var done bool - for done, err = future.Done(s); !done && err == nil; done, err = future.Done(s) { - // check for Retry-After delay, if not present use the specified polling delay - if pd, ok := future.GetPollingDelay(); ok { - delay = pd - } - // wait until the delay elapses or the context is cancelled - if delayElapsed := autorest.DelayForBackoff(delay, 0, r.Context().Done()); !delayElapsed { - return future.Response(), - autorest.NewErrorWithError(r.Context().Err(), "azure", "DoPollForAsynchronous", future.Response(), "context has been cancelled") - } - } - return future.Response(), err - }) - } -} - -// PollingMethodType defines a type used for enumerating polling mechanisms. -type PollingMethodType string - -const ( - // PollingAsyncOperation indicates the polling method uses the Azure-AsyncOperation header. - PollingAsyncOperation PollingMethodType = "AsyncOperation" - - // PollingLocation indicates the polling method uses the Location header. - PollingLocation PollingMethodType = "Location" - - // PollingRequestURI indicates the polling method uses the original request URI. - PollingRequestURI PollingMethodType = "RequestURI" - - // PollingUnknown indicates an unknown polling method and is the default value. - PollingUnknown PollingMethodType = "" -) - -// AsyncOpIncompleteError is the type that's returned from a future that has not completed. -type AsyncOpIncompleteError struct { - // FutureType is the name of the type composed of a azure.Future. - FutureType string -} - -// Error returns an error message including the originating type name of the error. -func (e AsyncOpIncompleteError) Error() string { - return fmt.Sprintf("%s: asynchronous operation has not completed", e.FutureType) -} - -// NewAsyncOpIncompleteError creates a new AsyncOpIncompleteError with the specified parameters. -func NewAsyncOpIncompleteError(futureType string) AsyncOpIncompleteError { - return AsyncOpIncompleteError{ - FutureType: futureType, - } -} diff --git a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go b/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go deleted file mode 100644 index 3a0a439..0000000 --- a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go +++ /dev/null @@ -1,326 +0,0 @@ -// Package azure provides Azure-specific implementations used with AutoRest. -// See the included examples for more detail. -package azure - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "regexp" - "strconv" - "strings" - - "github.com/Azure/go-autorest/autorest" -) - -const ( - // HeaderClientID is the Azure extension header to set a user-specified request ID. - HeaderClientID = "x-ms-client-request-id" - - // HeaderReturnClientID is the Azure extension header to set if the user-specified request ID - // should be included in the response. - HeaderReturnClientID = "x-ms-return-client-request-id" - - // HeaderRequestID is the Azure extension header of the service generated request ID returned - // in the response. - HeaderRequestID = "x-ms-request-id" -) - -// ServiceError encapsulates the error response from an Azure service. -// It adhears to the OData v4 specification for error responses. -type ServiceError struct { - Code string `json:"code"` - Message string `json:"message"` - Target *string `json:"target"` - Details []map[string]interface{} `json:"details"` - InnerError map[string]interface{} `json:"innererror"` - AdditionalInfo []map[string]interface{} `json:"additionalInfo"` -} - -func (se ServiceError) Error() string { - result := fmt.Sprintf("Code=%q Message=%q", se.Code, se.Message) - - if se.Target != nil { - result += fmt.Sprintf(" Target=%q", *se.Target) - } - - if se.Details != nil { - d, err := json.Marshal(se.Details) - if err != nil { - result += fmt.Sprintf(" Details=%v", se.Details) - } - result += fmt.Sprintf(" Details=%v", string(d)) - } - - if se.InnerError != nil { - d, err := json.Marshal(se.InnerError) - if err != nil { - result += fmt.Sprintf(" InnerError=%v", se.InnerError) - } - result += fmt.Sprintf(" InnerError=%v", string(d)) - } - - if se.AdditionalInfo != nil { - d, err := json.Marshal(se.AdditionalInfo) - if err != nil { - result += fmt.Sprintf(" AdditionalInfo=%v", se.AdditionalInfo) - } - result += fmt.Sprintf(" AdditionalInfo=%v", string(d)) - } - - return result -} - -// UnmarshalJSON implements the json.Unmarshaler interface for the ServiceError type. -func (se *ServiceError) UnmarshalJSON(b []byte) error { - // per the OData v4 spec the details field must be an array of JSON objects. - // unfortunately not all services adhear to the spec and just return a single - // object instead of an array with one object. so we have to perform some - // shenanigans to accommodate both cases. - // http://docs.oasis-open.org/odata/odata-json-format/v4.0/os/odata-json-format-v4.0-os.html#_Toc372793091 - - type serviceError1 struct { - Code string `json:"code"` - Message string `json:"message"` - Target *string `json:"target"` - Details []map[string]interface{} `json:"details"` - InnerError map[string]interface{} `json:"innererror"` - AdditionalInfo []map[string]interface{} `json:"additionalInfo"` - } - - type serviceError2 struct { - Code string `json:"code"` - Message string `json:"message"` - Target *string `json:"target"` - Details map[string]interface{} `json:"details"` - InnerError map[string]interface{} `json:"innererror"` - AdditionalInfo []map[string]interface{} `json:"additionalInfo"` - } - - se1 := serviceError1{} - err := json.Unmarshal(b, &se1) - if err == nil { - se.populate(se1.Code, se1.Message, se1.Target, se1.Details, se1.InnerError, se1.AdditionalInfo) - return nil - } - - se2 := serviceError2{} - err = json.Unmarshal(b, &se2) - if err == nil { - se.populate(se2.Code, se2.Message, se2.Target, nil, se2.InnerError, se2.AdditionalInfo) - se.Details = append(se.Details, se2.Details) - return nil - } - return err -} - -func (se *ServiceError) populate(code, message string, target *string, details []map[string]interface{}, inner map[string]interface{}, additional []map[string]interface{}) { - se.Code = code - se.Message = message - se.Target = target - se.Details = details - se.InnerError = inner - se.AdditionalInfo = additional -} - -// RequestError describes an error response returned by Azure service. -type RequestError struct { - autorest.DetailedError - - // The error returned by the Azure service. - ServiceError *ServiceError `json:"error"` - - // The request id (from the x-ms-request-id-header) of the request. - RequestID string -} - -// Error returns a human-friendly error message from service error. -func (e RequestError) Error() string { - return fmt.Sprintf("autorest/azure: Service returned an error. Status=%v %v", - e.StatusCode, e.ServiceError) -} - -// IsAzureError returns true if the passed error is an Azure Service error; false otherwise. -func IsAzureError(e error) bool { - _, ok := e.(*RequestError) - return ok -} - -// Resource contains details about an Azure resource. -type Resource struct { - SubscriptionID string - ResourceGroup string - Provider string - ResourceType string - ResourceName string -} - -// ParseResourceID parses a resource ID into a ResourceDetails struct. -// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-template-functions-resource#return-value-4. -func ParseResourceID(resourceID string) (Resource, error) { - - const resourceIDPatternText = `(?i)subscriptions/(.+)/resourceGroups/(.+)/providers/(.+?)/(.+?)/(.+)` - resourceIDPattern := regexp.MustCompile(resourceIDPatternText) - match := resourceIDPattern.FindStringSubmatch(resourceID) - - if len(match) == 0 { - return Resource{}, fmt.Errorf("parsing failed for %s. Invalid resource Id format", resourceID) - } - - v := strings.Split(match[5], "/") - resourceName := v[len(v)-1] - - result := Resource{ - SubscriptionID: match[1], - ResourceGroup: match[2], - Provider: match[3], - ResourceType: match[4], - ResourceName: resourceName, - } - - return result, nil -} - -// NewErrorWithError creates a new Error conforming object from the -// passed packageType, method, statusCode of the given resp (UndefinedStatusCode -// if resp is nil), message, and original error. message is treated as a format -// string to which the optional args apply. -func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) RequestError { - if v, ok := original.(*RequestError); ok { - return *v - } - - statusCode := autorest.UndefinedStatusCode - if resp != nil { - statusCode = resp.StatusCode - } - return RequestError{ - DetailedError: autorest.DetailedError{ - Original: original, - PackageType: packageType, - Method: method, - StatusCode: statusCode, - Message: fmt.Sprintf(message, args...), - }, - } -} - -// WithReturningClientID returns a PrepareDecorator that adds an HTTP extension header of -// x-ms-client-request-id whose value is the passed, undecorated UUID (e.g., -// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). It also sets the x-ms-return-client-request-id -// header to true such that UUID accompanies the http.Response. -func WithReturningClientID(uuid string) autorest.PrepareDecorator { - preparer := autorest.CreatePreparer( - WithClientID(uuid), - WithReturnClientID(true)) - - return func(p autorest.Preparer) autorest.Preparer { - return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err != nil { - return r, err - } - return preparer.Prepare(r) - }) - } -} - -// WithClientID returns a PrepareDecorator that adds an HTTP extension header of -// x-ms-client-request-id whose value is passed, undecorated UUID (e.g., -// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). -func WithClientID(uuid string) autorest.PrepareDecorator { - return autorest.WithHeader(HeaderClientID, uuid) -} - -// WithReturnClientID returns a PrepareDecorator that adds an HTTP extension header of -// x-ms-return-client-request-id whose boolean value indicates if the value of the -// x-ms-client-request-id header should be included in the http.Response. -func WithReturnClientID(b bool) autorest.PrepareDecorator { - return autorest.WithHeader(HeaderReturnClientID, strconv.FormatBool(b)) -} - -// ExtractClientID extracts the client identifier from the x-ms-client-request-id header set on the -// http.Request sent to the service (and returned in the http.Response) -func ExtractClientID(resp *http.Response) string { - return autorest.ExtractHeaderValue(HeaderClientID, resp) -} - -// ExtractRequestID extracts the Azure server generated request identifier from the -// x-ms-request-id header. -func ExtractRequestID(resp *http.Response) string { - return autorest.ExtractHeaderValue(HeaderRequestID, resp) -} - -// WithErrorUnlessStatusCode returns a RespondDecorator that emits an -// azure.RequestError by reading the response body unless the response HTTP status code -// is among the set passed. -// -// If there is a chance service may return responses other than the Azure error -// format and the response cannot be parsed into an error, a decoding error will -// be returned containing the response body. In any case, the Responder will -// return an error if the status code is not satisfied. -// -// If this Responder returns an error, the response body will be replaced with -// an in-memory reader, which needs no further closing. -func WithErrorUnlessStatusCode(codes ...int) autorest.RespondDecorator { - return func(r autorest.Responder) autorest.Responder { - return autorest.ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil && !autorest.ResponseHasStatusCode(resp, codes...) { - var e RequestError - defer resp.Body.Close() - - // Copy and replace the Body in case it does not contain an error object. - // This will leave the Body available to the caller. - b, decodeErr := autorest.CopyAndDecode(autorest.EncodedAsJSON, resp.Body, &e) - resp.Body = ioutil.NopCloser(&b) - if decodeErr != nil { - return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b.String(), decodeErr) - } - if e.ServiceError == nil { - // Check if error is unwrapped ServiceError - if err := json.Unmarshal(b.Bytes(), &e.ServiceError); err != nil { - return err - } - } - if e.ServiceError.Message == "" { - // if we're here it means the returned error wasn't OData v4 compliant. - // try to unmarshal the body as raw JSON in hopes of getting something. - rawBody := map[string]interface{}{} - if err := json.Unmarshal(b.Bytes(), &rawBody); err != nil { - return err - } - e.ServiceError = &ServiceError{ - Code: "Unknown", - Message: "Unknown service error", - } - if len(rawBody) > 0 { - e.ServiceError.Details = []map[string]interface{}{rawBody} - } - } - e.Response = resp - e.RequestID = ExtractRequestID(resp) - if e.StatusCode == nil { - e.StatusCode = resp.StatusCode - } - err = &e - } - return err - }) - } -} diff --git a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go b/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go deleted file mode 100644 index 85d3202..0000000 --- a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go +++ /dev/null @@ -1,196 +0,0 @@ -package azure - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "strings" -) - -// EnvironmentFilepathName captures the name of the environment variable containing the path to the file -// to be used while populating the Azure Environment. -const EnvironmentFilepathName = "AZURE_ENVIRONMENT_FILEPATH" - -var environments = map[string]Environment{ - "AZURECHINACLOUD": ChinaCloud, - "AZUREGERMANCLOUD": GermanCloud, - "AZUREPUBLICCLOUD": PublicCloud, - "AZUREUSGOVERNMENTCLOUD": USGovernmentCloud, -} - -// Environment represents a set of endpoints for each of Azure's Clouds. -type Environment struct { - Name string `json:"name"` - ManagementPortalURL string `json:"managementPortalURL"` - PublishSettingsURL string `json:"publishSettingsURL"` - ServiceManagementEndpoint string `json:"serviceManagementEndpoint"` - ResourceManagerEndpoint string `json:"resourceManagerEndpoint"` - ActiveDirectoryEndpoint string `json:"activeDirectoryEndpoint"` - GalleryEndpoint string `json:"galleryEndpoint"` - KeyVaultEndpoint string `json:"keyVaultEndpoint"` - GraphEndpoint string `json:"graphEndpoint"` - ServiceBusEndpoint string `json:"serviceBusEndpoint"` - BatchManagementEndpoint string `json:"batchManagementEndpoint"` - StorageEndpointSuffix string `json:"storageEndpointSuffix"` - SQLDatabaseDNSSuffix string `json:"sqlDatabaseDNSSuffix"` - TrafficManagerDNSSuffix string `json:"trafficManagerDNSSuffix"` - KeyVaultDNSSuffix string `json:"keyVaultDNSSuffix"` - ServiceBusEndpointSuffix string `json:"serviceBusEndpointSuffix"` - ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"` - ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"` - ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"` - CosmosDBDNSSuffix string `json:"cosmosDBDNSSuffix"` - TokenAudience string `json:"tokenAudience"` -} - -var ( - // PublicCloud is the default public Azure cloud environment - PublicCloud = Environment{ - Name: "AzurePublicCloud", - ManagementPortalURL: "https://manage.windowsazure.com/", - PublishSettingsURL: "https://manage.windowsazure.com/publishsettings/index", - ServiceManagementEndpoint: "https://management.core.windows.net/", - ResourceManagerEndpoint: "https://management.azure.com/", - ActiveDirectoryEndpoint: "https://login.microsoftonline.com/", - GalleryEndpoint: "https://gallery.azure.com/", - KeyVaultEndpoint: "https://vault.azure.net/", - GraphEndpoint: "https://graph.windows.net/", - ServiceBusEndpoint: "https://servicebus.windows.net/", - BatchManagementEndpoint: "https://batch.core.windows.net/", - StorageEndpointSuffix: "core.windows.net", - SQLDatabaseDNSSuffix: "database.windows.net", - TrafficManagerDNSSuffix: "trafficmanager.net", - KeyVaultDNSSuffix: "vault.azure.net", - ServiceBusEndpointSuffix: "servicebus.windows.net", - ServiceManagementVMDNSSuffix: "cloudapp.net", - ResourceManagerVMDNSSuffix: "cloudapp.azure.com", - ContainerRegistryDNSSuffix: "azurecr.io", - CosmosDBDNSSuffix: "documents.azure.com", - TokenAudience: "https://management.azure.com/", - } - - // USGovernmentCloud is the cloud environment for the US Government - USGovernmentCloud = Environment{ - Name: "AzureUSGovernmentCloud", - ManagementPortalURL: "https://manage.windowsazure.us/", - PublishSettingsURL: "https://manage.windowsazure.us/publishsettings/index", - ServiceManagementEndpoint: "https://management.core.usgovcloudapi.net/", - ResourceManagerEndpoint: "https://management.usgovcloudapi.net/", - ActiveDirectoryEndpoint: "https://login.microsoftonline.us/", - GalleryEndpoint: "https://gallery.usgovcloudapi.net/", - KeyVaultEndpoint: "https://vault.usgovcloudapi.net/", - GraphEndpoint: "https://graph.windows.net/", - ServiceBusEndpoint: "https://servicebus.usgovcloudapi.net/", - BatchManagementEndpoint: "https://batch.core.usgovcloudapi.net/", - StorageEndpointSuffix: "core.usgovcloudapi.net", - SQLDatabaseDNSSuffix: "database.usgovcloudapi.net", - TrafficManagerDNSSuffix: "usgovtrafficmanager.net", - KeyVaultDNSSuffix: "vault.usgovcloudapi.net", - ServiceBusEndpointSuffix: "servicebus.usgovcloudapi.net", - ServiceManagementVMDNSSuffix: "usgovcloudapp.net", - ResourceManagerVMDNSSuffix: "cloudapp.windowsazure.us", - ContainerRegistryDNSSuffix: "azurecr.us", - CosmosDBDNSSuffix: "documents.azure.us", - TokenAudience: "https://management.usgovcloudapi.net/", - } - - // ChinaCloud is the cloud environment operated in China - ChinaCloud = Environment{ - Name: "AzureChinaCloud", - ManagementPortalURL: "https://manage.chinacloudapi.com/", - PublishSettingsURL: "https://manage.chinacloudapi.com/publishsettings/index", - ServiceManagementEndpoint: "https://management.core.chinacloudapi.cn/", - ResourceManagerEndpoint: "https://management.chinacloudapi.cn/", - ActiveDirectoryEndpoint: "https://login.chinacloudapi.cn/", - GalleryEndpoint: "https://gallery.chinacloudapi.cn/", - KeyVaultEndpoint: "https://vault.azure.cn/", - GraphEndpoint: "https://graph.chinacloudapi.cn/", - ServiceBusEndpoint: "https://servicebus.chinacloudapi.cn/", - BatchManagementEndpoint: "https://batch.chinacloudapi.cn/", - StorageEndpointSuffix: "core.chinacloudapi.cn", - SQLDatabaseDNSSuffix: "database.chinacloudapi.cn", - TrafficManagerDNSSuffix: "trafficmanager.cn", - KeyVaultDNSSuffix: "vault.azure.cn", - ServiceBusEndpointSuffix: "servicebus.chinacloudapi.cn", - ServiceManagementVMDNSSuffix: "chinacloudapp.cn", - ResourceManagerVMDNSSuffix: "cloudapp.azure.cn", - ContainerRegistryDNSSuffix: "azurecr.cn", - CosmosDBDNSSuffix: "documents.azure.cn", - TokenAudience: "https://management.chinacloudapi.cn/", - } - - // GermanCloud is the cloud environment operated in Germany - GermanCloud = Environment{ - Name: "AzureGermanCloud", - ManagementPortalURL: "http://portal.microsoftazure.de/", - PublishSettingsURL: "https://manage.microsoftazure.de/publishsettings/index", - ServiceManagementEndpoint: "https://management.core.cloudapi.de/", - ResourceManagerEndpoint: "https://management.microsoftazure.de/", - ActiveDirectoryEndpoint: "https://login.microsoftonline.de/", - GalleryEndpoint: "https://gallery.cloudapi.de/", - KeyVaultEndpoint: "https://vault.microsoftazure.de/", - GraphEndpoint: "https://graph.cloudapi.de/", - ServiceBusEndpoint: "https://servicebus.cloudapi.de/", - BatchManagementEndpoint: "https://batch.cloudapi.de/", - StorageEndpointSuffix: "core.cloudapi.de", - SQLDatabaseDNSSuffix: "database.cloudapi.de", - TrafficManagerDNSSuffix: "azuretrafficmanager.de", - KeyVaultDNSSuffix: "vault.microsoftazure.de", - ServiceBusEndpointSuffix: "servicebus.cloudapi.de", - ServiceManagementVMDNSSuffix: "azurecloudapp.de", - ResourceManagerVMDNSSuffix: "cloudapp.microsoftazure.de", - // ContainerRegistryDNSSuffix: "", ACR not present yet in the German Cloud - CosmosDBDNSSuffix: "documents.microsoftazure.de", - TokenAudience: "https://management.microsoftazure.de/", - } -) - -// EnvironmentFromName returns an Environment based on the common name specified. -func EnvironmentFromName(name string) (Environment, error) { - // IMPORTANT - // As per @radhikagupta5: - // This is technical debt, fundamentally here because Kubernetes is not currently accepting - // contributions to the providers. Once that is an option, the provider should be updated to - // directly call `EnvironmentFromFile`. Until then, we rely on dispatching Azure Stack environment creation - // from this method based on the name that is provided to us. - if strings.EqualFold(name, "AZURESTACKCLOUD") { - return EnvironmentFromFile(os.Getenv(EnvironmentFilepathName)) - } - - name = strings.ToUpper(name) - env, ok := environments[name] - if !ok { - return env, fmt.Errorf("autorest/azure: There is no cloud environment matching the name %q", name) - } - - return env, nil -} - -// EnvironmentFromFile loads an Environment from a configuration file available on disk. -// This function is particularly useful in the Hybrid Cloud model, where one must define their own -// endpoints. -func EnvironmentFromFile(location string) (unmarshaled Environment, err error) { - fileContents, err := ioutil.ReadFile(location) - if err != nil { - return - } - - err = json.Unmarshal(fileContents, &unmarshaled) - - return -} diff --git a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go b/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go deleted file mode 100644 index 507f9e9..0000000 --- a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go +++ /dev/null @@ -1,245 +0,0 @@ -package azure - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - - "github.com/Azure/go-autorest/autorest" -) - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -type audience []string - -type authentication struct { - LoginEndpoint string `json:"loginEndpoint"` - Audiences audience `json:"audiences"` -} - -type environmentMetadataInfo struct { - GalleryEndpoint string `json:"galleryEndpoint"` - GraphEndpoint string `json:"graphEndpoint"` - PortalEndpoint string `json:"portalEndpoint"` - Authentication authentication `json:"authentication"` -} - -// EnvironmentProperty represent property names that clients can override -type EnvironmentProperty string - -const ( - // EnvironmentName ... - EnvironmentName EnvironmentProperty = "name" - // EnvironmentManagementPortalURL .. - EnvironmentManagementPortalURL EnvironmentProperty = "managementPortalURL" - // EnvironmentPublishSettingsURL ... - EnvironmentPublishSettingsURL EnvironmentProperty = "publishSettingsURL" - // EnvironmentServiceManagementEndpoint ... - EnvironmentServiceManagementEndpoint EnvironmentProperty = "serviceManagementEndpoint" - // EnvironmentResourceManagerEndpoint ... - EnvironmentResourceManagerEndpoint EnvironmentProperty = "resourceManagerEndpoint" - // EnvironmentActiveDirectoryEndpoint ... - EnvironmentActiveDirectoryEndpoint EnvironmentProperty = "activeDirectoryEndpoint" - // EnvironmentGalleryEndpoint ... - EnvironmentGalleryEndpoint EnvironmentProperty = "galleryEndpoint" - // EnvironmentKeyVaultEndpoint ... - EnvironmentKeyVaultEndpoint EnvironmentProperty = "keyVaultEndpoint" - // EnvironmentGraphEndpoint ... - EnvironmentGraphEndpoint EnvironmentProperty = "graphEndpoint" - // EnvironmentServiceBusEndpoint ... - EnvironmentServiceBusEndpoint EnvironmentProperty = "serviceBusEndpoint" - // EnvironmentBatchManagementEndpoint ... - EnvironmentBatchManagementEndpoint EnvironmentProperty = "batchManagementEndpoint" - // EnvironmentStorageEndpointSuffix ... - EnvironmentStorageEndpointSuffix EnvironmentProperty = "storageEndpointSuffix" - // EnvironmentSQLDatabaseDNSSuffix ... - EnvironmentSQLDatabaseDNSSuffix EnvironmentProperty = "sqlDatabaseDNSSuffix" - // EnvironmentTrafficManagerDNSSuffix ... - EnvironmentTrafficManagerDNSSuffix EnvironmentProperty = "trafficManagerDNSSuffix" - // EnvironmentKeyVaultDNSSuffix ... - EnvironmentKeyVaultDNSSuffix EnvironmentProperty = "keyVaultDNSSuffix" - // EnvironmentServiceBusEndpointSuffix ... - EnvironmentServiceBusEndpointSuffix EnvironmentProperty = "serviceBusEndpointSuffix" - // EnvironmentServiceManagementVMDNSSuffix ... - EnvironmentServiceManagementVMDNSSuffix EnvironmentProperty = "serviceManagementVMDNSSuffix" - // EnvironmentResourceManagerVMDNSSuffix ... - EnvironmentResourceManagerVMDNSSuffix EnvironmentProperty = "resourceManagerVMDNSSuffix" - // EnvironmentContainerRegistryDNSSuffix ... - EnvironmentContainerRegistryDNSSuffix EnvironmentProperty = "containerRegistryDNSSuffix" - // EnvironmentTokenAudience ... - EnvironmentTokenAudience EnvironmentProperty = "tokenAudience" -) - -// OverrideProperty represents property name and value that clients can override -type OverrideProperty struct { - Key EnvironmentProperty - Value string -} - -// EnvironmentFromURL loads an Environment from a URL -// This function is particularly useful in the Hybrid Cloud model, where one may define their own -// endpoints. -func EnvironmentFromURL(resourceManagerEndpoint string, properties ...OverrideProperty) (environment Environment, err error) { - var metadataEnvProperties environmentMetadataInfo - - if resourceManagerEndpoint == "" { - return environment, fmt.Errorf("Metadata resource manager endpoint is empty") - } - - if metadataEnvProperties, err = retrieveMetadataEnvironment(resourceManagerEndpoint); err != nil { - return environment, err - } - - // Give priority to user's override values - overrideProperties(&environment, properties) - - if environment.Name == "" { - environment.Name = "HybridEnvironment" - } - stampDNSSuffix := environment.StorageEndpointSuffix - if stampDNSSuffix == "" { - stampDNSSuffix = strings.TrimSuffix(strings.TrimPrefix(strings.Replace(resourceManagerEndpoint, strings.Split(resourceManagerEndpoint, ".")[0], "", 1), "."), "/") - environment.StorageEndpointSuffix = stampDNSSuffix - } - if environment.KeyVaultDNSSuffix == "" { - environment.KeyVaultDNSSuffix = fmt.Sprintf("%s.%s", "vault", stampDNSSuffix) - } - if environment.KeyVaultEndpoint == "" { - environment.KeyVaultEndpoint = fmt.Sprintf("%s%s", "https://", environment.KeyVaultDNSSuffix) - } - if environment.TokenAudience == "" { - environment.TokenAudience = metadataEnvProperties.Authentication.Audiences[0] - } - if environment.ActiveDirectoryEndpoint == "" { - environment.ActiveDirectoryEndpoint = metadataEnvProperties.Authentication.LoginEndpoint - } - if environment.ResourceManagerEndpoint == "" { - environment.ResourceManagerEndpoint = resourceManagerEndpoint - } - if environment.GalleryEndpoint == "" { - environment.GalleryEndpoint = metadataEnvProperties.GalleryEndpoint - } - if environment.GraphEndpoint == "" { - environment.GraphEndpoint = metadataEnvProperties.GraphEndpoint - } - - return environment, nil -} - -func overrideProperties(environment *Environment, properties []OverrideProperty) { - for _, property := range properties { - switch property.Key { - case EnvironmentName: - { - environment.Name = property.Value - } - case EnvironmentManagementPortalURL: - { - environment.ManagementPortalURL = property.Value - } - case EnvironmentPublishSettingsURL: - { - environment.PublishSettingsURL = property.Value - } - case EnvironmentServiceManagementEndpoint: - { - environment.ServiceManagementEndpoint = property.Value - } - case EnvironmentResourceManagerEndpoint: - { - environment.ResourceManagerEndpoint = property.Value - } - case EnvironmentActiveDirectoryEndpoint: - { - environment.ActiveDirectoryEndpoint = property.Value - } - case EnvironmentGalleryEndpoint: - { - environment.GalleryEndpoint = property.Value - } - case EnvironmentKeyVaultEndpoint: - { - environment.KeyVaultEndpoint = property.Value - } - case EnvironmentGraphEndpoint: - { - environment.GraphEndpoint = property.Value - } - case EnvironmentServiceBusEndpoint: - { - environment.ServiceBusEndpoint = property.Value - } - case EnvironmentBatchManagementEndpoint: - { - environment.BatchManagementEndpoint = property.Value - } - case EnvironmentStorageEndpointSuffix: - { - environment.StorageEndpointSuffix = property.Value - } - case EnvironmentSQLDatabaseDNSSuffix: - { - environment.SQLDatabaseDNSSuffix = property.Value - } - case EnvironmentTrafficManagerDNSSuffix: - { - environment.TrafficManagerDNSSuffix = property.Value - } - case EnvironmentKeyVaultDNSSuffix: - { - environment.KeyVaultDNSSuffix = property.Value - } - case EnvironmentServiceBusEndpointSuffix: - { - environment.ServiceBusEndpointSuffix = property.Value - } - case EnvironmentServiceManagementVMDNSSuffix: - { - environment.ServiceManagementVMDNSSuffix = property.Value - } - case EnvironmentResourceManagerVMDNSSuffix: - { - environment.ResourceManagerVMDNSSuffix = property.Value - } - case EnvironmentContainerRegistryDNSSuffix: - { - environment.ContainerRegistryDNSSuffix = property.Value - } - case EnvironmentTokenAudience: - { - environment.TokenAudience = property.Value - } - } - } -} - -func retrieveMetadataEnvironment(endpoint string) (environment environmentMetadataInfo, err error) { - client := autorest.NewClientWithUserAgent("") - managementEndpoint := fmt.Sprintf("%s%s", strings.TrimSuffix(endpoint, "/"), "/metadata/endpoints?api-version=1.0") - req, _ := http.NewRequest("GET", managementEndpoint, nil) - response, err := client.Do(req) - if err != nil { - return environment, err - } - defer response.Body.Close() - jsonResponse, err := ioutil.ReadAll(response.Body) - if err != nil { - return environment, err - } - err = json.Unmarshal(jsonResponse, &environment) - return environment, err -} diff --git a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go b/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go deleted file mode 100644 index 86ce9f2..0000000 --- a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go +++ /dev/null @@ -1,200 +0,0 @@ -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package azure - -import ( - "errors" - "fmt" - "net/http" - "net/url" - "strings" - "time" - - "github.com/Azure/go-autorest/autorest" -) - -// DoRetryWithRegistration tries to register the resource provider in case it is unregistered. -// It also handles request retries -func DoRetryWithRegistration(client autorest.Client) autorest.SendDecorator { - return func(s autorest.Sender) autorest.Sender { - return autorest.SenderFunc(func(r *http.Request) (resp *http.Response, err error) { - rr := autorest.NewRetriableRequest(r) - for currentAttempt := 0; currentAttempt < client.RetryAttempts; currentAttempt++ { - err = rr.Prepare() - if err != nil { - return resp, err - } - - resp, err = autorest.SendWithSender(s, rr.Request(), - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), - ) - if err != nil { - return resp, err - } - - if resp.StatusCode != http.StatusConflict || client.SkipResourceProviderRegistration { - return resp, err - } - var re RequestError - err = autorest.Respond( - resp, - autorest.ByUnmarshallingJSON(&re), - ) - if err != nil { - return resp, err - } - err = re - - if re.ServiceError != nil && re.ServiceError.Code == "MissingSubscriptionRegistration" { - regErr := register(client, r, re) - if regErr != nil { - return resp, fmt.Errorf("failed auto registering Resource Provider: %s. Original error: %s", regErr, err) - } - } - } - return resp, err - }) - } -} - -func getProvider(re RequestError) (string, error) { - if re.ServiceError != nil && len(re.ServiceError.Details) > 0 { - return re.ServiceError.Details[0]["target"].(string), nil - } - return "", errors.New("provider was not found in the response") -} - -func register(client autorest.Client, originalReq *http.Request, re RequestError) error { - subID := getSubscription(originalReq.URL.Path) - if subID == "" { - return errors.New("missing parameter subscriptionID to register resource provider") - } - providerName, err := getProvider(re) - if err != nil { - return fmt.Errorf("missing parameter provider to register resource provider: %s", err) - } - newURL := url.URL{ - Scheme: originalReq.URL.Scheme, - Host: originalReq.URL.Host, - } - - // taken from the resources SDK - // with almost identical code, this sections are easier to mantain - // It is also not a good idea to import the SDK here - // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L252 - pathParameters := map[string]interface{}{ - "resourceProviderNamespace": autorest.Encode("path", providerName), - "subscriptionId": autorest.Encode("path", subID), - } - - const APIVersion = "2016-09-01" - queryParameters := map[string]interface{}{ - "api-version": APIVersion, - } - - preparer := autorest.CreatePreparer( - autorest.AsPost(), - autorest.WithBaseURL(newURL.String()), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/register", pathParameters), - autorest.WithQueryParameters(queryParameters), - ) - - req, err := preparer.Prepare(&http.Request{}) - if err != nil { - return err - } - req = req.WithContext(originalReq.Context()) - - resp, err := autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), - ) - if err != nil { - return err - } - - type Provider struct { - RegistrationState *string `json:"registrationState,omitempty"` - } - var provider Provider - - err = autorest.Respond( - resp, - WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&provider), - autorest.ByClosing(), - ) - if err != nil { - return err - } - - // poll for registered provisioning state - registrationStartTime := time.Now() - for err == nil && (client.PollingDuration == 0 || (client.PollingDuration != 0 && time.Since(registrationStartTime) < client.PollingDuration)) { - // taken from the resources SDK - // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L45 - preparer := autorest.CreatePreparer( - autorest.AsGet(), - autorest.WithBaseURL(newURL.String()), - autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}", pathParameters), - autorest.WithQueryParameters(queryParameters), - ) - req, err = preparer.Prepare(&http.Request{}) - if err != nil { - return err - } - req = req.WithContext(originalReq.Context()) - - resp, err := autorest.SendWithSender(client, req, - autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), - ) - if err != nil { - return err - } - - err = autorest.Respond( - resp, - WithErrorUnlessStatusCode(http.StatusOK), - autorest.ByUnmarshallingJSON(&provider), - autorest.ByClosing(), - ) - if err != nil { - return err - } - - if provider.RegistrationState != nil && - *provider.RegistrationState == "Registered" { - break - } - - delayed := autorest.DelayWithRetryAfter(resp, originalReq.Context().Done()) - if !delayed && !autorest.DelayForBackoff(client.PollingDelay, 0, originalReq.Context().Done()) { - return originalReq.Context().Err() - } - } - if client.PollingDuration != 0 && !(time.Since(registrationStartTime) < client.PollingDuration) { - return errors.New("polling for resource provider registration has exceeded the polling duration") - } - return err -} - -func getSubscription(path string) string { - parts := strings.Split(path, "/") - for i, v := range parts { - if v == "subscriptions" && (i+1) < len(parts) { - return parts[i+1] - } - } - return "" -} diff --git a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/client.go b/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/client.go deleted file mode 100644 index 3496415..0000000 --- a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/client.go +++ /dev/null @@ -1,276 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "crypto/tls" - "fmt" - "io" - "io/ioutil" - "log" - "net/http" - "net/http/cookiejar" - "strings" - "time" - - "github.com/Azure/go-autorest/logger" - "github.com/Azure/go-autorest/tracing" -) - -const ( - // DefaultPollingDelay is a reasonable delay between polling requests. - DefaultPollingDelay = 60 * time.Second - - // DefaultPollingDuration is a reasonable total polling duration. - DefaultPollingDuration = 15 * time.Minute - - // DefaultRetryAttempts is number of attempts for retry status codes (5xx). - DefaultRetryAttempts = 3 - - // DefaultRetryDuration is the duration to wait between retries. - DefaultRetryDuration = 30 * time.Second -) - -var ( - // StatusCodesForRetry are a defined group of status code for which the client will retry - StatusCodesForRetry = []int{ - http.StatusRequestTimeout, // 408 - http.StatusTooManyRequests, // 429 - http.StatusInternalServerError, // 500 - http.StatusBadGateway, // 502 - http.StatusServiceUnavailable, // 503 - http.StatusGatewayTimeout, // 504 - } -) - -const ( - requestFormat = `HTTP Request Begin =================================================== -%s -===================================================== HTTP Request End -` - responseFormat = `HTTP Response Begin =================================================== -%s -===================================================== HTTP Response End -` -) - -// Response serves as the base for all responses from generated clients. It provides access to the -// last http.Response. -type Response struct { - *http.Response `json:"-"` -} - -// LoggingInspector implements request and response inspectors that log the full request and -// response to a supplied log. -type LoggingInspector struct { - Logger *log.Logger -} - -// WithInspection returns a PrepareDecorator that emits the http.Request to the supplied logger. The -// body is restored after being emitted. -// -// Note: Since it reads the entire Body, this decorator should not be used where body streaming is -// important. It is best used to trace JSON or similar body values. -func (li LoggingInspector) WithInspection() PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - var body, b bytes.Buffer - - defer r.Body.Close() - - r.Body = ioutil.NopCloser(io.TeeReader(r.Body, &body)) - if err := r.Write(&b); err != nil { - return nil, fmt.Errorf("Failed to write response: %v", err) - } - - li.Logger.Printf(requestFormat, b.String()) - - r.Body = ioutil.NopCloser(&body) - return p.Prepare(r) - }) - } -} - -// ByInspecting returns a RespondDecorator that emits the http.Response to the supplied logger. The -// body is restored after being emitted. -// -// Note: Since it reads the entire Body, this decorator should not be used where body streaming is -// important. It is best used to trace JSON or similar body values. -func (li LoggingInspector) ByInspecting() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - var body, b bytes.Buffer - defer resp.Body.Close() - resp.Body = ioutil.NopCloser(io.TeeReader(resp.Body, &body)) - if err := resp.Write(&b); err != nil { - return fmt.Errorf("Failed to write response: %v", err) - } - - li.Logger.Printf(responseFormat, b.String()) - - resp.Body = ioutil.NopCloser(&body) - return r.Respond(resp) - }) - } -} - -// Client is the base for autorest generated clients. It provides default, "do nothing" -// implementations of an Authorizer, RequestInspector, and ResponseInspector. It also returns the -// standard, undecorated http.Client as a default Sender. -// -// Generated clients should also use Error (see NewError and NewErrorWithError) for errors and -// return responses that compose with Response. -// -// Most customization of generated clients is best achieved by supplying a custom Authorizer, custom -// RequestInspector, and / or custom ResponseInspector. Users may log requests, implement circuit -// breakers (see https://msdn.microsoft.com/en-us/library/dn589784.aspx) or otherwise influence -// sending the request by providing a decorated Sender. -type Client struct { - Authorizer Authorizer - Sender Sender - RequestInspector PrepareDecorator - ResponseInspector RespondDecorator - - // PollingDelay sets the polling frequency used in absence of a Retry-After HTTP header - PollingDelay time.Duration - - // PollingDuration sets the maximum polling time after which an error is returned. - // Setting this to zero will use the provided context to control the duration. - PollingDuration time.Duration - - // RetryAttempts sets the default number of retry attempts for client. - RetryAttempts int - - // RetryDuration sets the delay duration for retries. - RetryDuration time.Duration - - // UserAgent, if not empty, will be set as the HTTP User-Agent header on all requests sent - // through the Do method. - UserAgent string - - Jar http.CookieJar - - // Set to true to skip attempted registration of resource providers (false by default). - SkipResourceProviderRegistration bool -} - -// NewClientWithUserAgent returns an instance of a Client with the UserAgent set to the passed -// string. -func NewClientWithUserAgent(ua string) Client { - c := Client{ - PollingDelay: DefaultPollingDelay, - PollingDuration: DefaultPollingDuration, - RetryAttempts: DefaultRetryAttempts, - RetryDuration: DefaultRetryDuration, - UserAgent: UserAgent(), - } - c.Sender = c.sender() - c.AddToUserAgent(ua) - return c -} - -// AddToUserAgent adds an extension to the current user agent -func (c *Client) AddToUserAgent(extension string) error { - if extension != "" { - c.UserAgent = fmt.Sprintf("%s %s", c.UserAgent, extension) - return nil - } - return fmt.Errorf("Extension was empty, User Agent stayed as %s", c.UserAgent) -} - -// Do implements the Sender interface by invoking the active Sender after applying authorization. -// If Sender is not set, it uses a new instance of http.Client. In both cases it will, if UserAgent -// is set, apply set the User-Agent header. -func (c Client) Do(r *http.Request) (*http.Response, error) { - if r.UserAgent() == "" { - r, _ = Prepare(r, - WithUserAgent(c.UserAgent)) - } - // NOTE: c.WithInspection() must be last in the list so that it can inspect all preceding operations - r, err := Prepare(r, - c.WithAuthorization(), - c.WithInspection()) - if err != nil { - var resp *http.Response - if detErr, ok := err.(DetailedError); ok { - // if the authorization failed (e.g. invalid credentials) there will - // be a response associated with the error, be sure to return it. - resp = detErr.Response - } - return resp, NewErrorWithError(err, "autorest/Client", "Do", nil, "Preparing request failed") - } - logger.Instance.WriteRequest(r, logger.Filter{ - Header: func(k string, v []string) (bool, []string) { - // remove the auth token from the log - if strings.EqualFold(k, "Authorization") || strings.EqualFold(k, "Ocp-Apim-Subscription-Key") { - v = []string{"**REDACTED**"} - } - return true, v - }, - }) - resp, err := SendWithSender(c.sender(), r) - logger.Instance.WriteResponse(resp, logger.Filter{}) - Respond(resp, c.ByInspecting()) - return resp, err -} - -// sender returns the Sender to which to send requests. -func (c Client) sender() Sender { - if c.Sender == nil { - j, _ := cookiejar.New(nil) - tracing.Transport.Base = &http.Transport{ - TLSClientConfig: &tls.Config{ - MinVersion: tls.VersionTLS12, - }, - } - client := &http.Client{Jar: j, Transport: tracing.Transport} - return client - } - - return c.Sender -} - -// WithAuthorization is a convenience method that returns the WithAuthorization PrepareDecorator -// from the current Authorizer. If not Authorizer is set, it uses the NullAuthorizer. -func (c Client) WithAuthorization() PrepareDecorator { - return c.authorizer().WithAuthorization() -} - -// authorizer returns the Authorizer to use. -func (c Client) authorizer() Authorizer { - if c.Authorizer == nil { - return NullAuthorizer{} - } - return c.Authorizer -} - -// WithInspection is a convenience method that passes the request to the supplied RequestInspector, -// if present, or returns the WithNothing PrepareDecorator otherwise. -func (c Client) WithInspection() PrepareDecorator { - if c.RequestInspector == nil { - return WithNothing() - } - return c.RequestInspector -} - -// ByInspecting is a convenience method that passes the response to the supplied ResponseInspector, -// if present, or returns the ByIgnoring RespondDecorator otherwise. -func (c Client) ByInspecting() RespondDecorator { - if c.ResponseInspector == nil { - return ByIgnoring() - } - return c.ResponseInspector -} diff --git a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/date/date.go b/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/date/date.go deleted file mode 100644 index c457106..0000000 --- a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/date/date.go +++ /dev/null @@ -1,96 +0,0 @@ -/* -Package date provides time.Time derivatives that conform to the Swagger.io (https://swagger.io/) -defined date formats: Date and DateTime. Both types may, in most cases, be used in lieu of -time.Time types. And both convert to time.Time through a ToTime method. -*/ -package date - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "fmt" - "time" -) - -const ( - fullDate = "2006-01-02" - fullDateJSON = `"2006-01-02"` - dateFormat = "%04d-%02d-%02d" - jsonFormat = `"%04d-%02d-%02d"` -) - -// Date defines a type similar to time.Time but assumes a layout of RFC3339 full-date (i.e., -// 2006-01-02). -type Date struct { - time.Time -} - -// ParseDate create a new Date from the passed string. -func ParseDate(date string) (d Date, err error) { - return parseDate(date, fullDate) -} - -func parseDate(date string, format string) (Date, error) { - d, err := time.Parse(format, date) - return Date{Time: d}, err -} - -// MarshalBinary preserves the Date as a byte array conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d Date) MarshalBinary() ([]byte, error) { - return d.MarshalText() -} - -// UnmarshalBinary reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d *Date) UnmarshalBinary(data []byte) error { - return d.UnmarshalText(data) -} - -// MarshalJSON preserves the Date as a JSON string conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d Date) MarshalJSON() (json []byte, err error) { - return []byte(fmt.Sprintf(jsonFormat, d.Year(), d.Month(), d.Day())), nil -} - -// UnmarshalJSON reconstitutes the Date from a JSON string conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d *Date) UnmarshalJSON(data []byte) (err error) { - d.Time, err = time.Parse(fullDateJSON, string(data)) - return err -} - -// MarshalText preserves the Date as a byte array conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d Date) MarshalText() (text []byte, err error) { - return []byte(fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day())), nil -} - -// UnmarshalText reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., -// 2006-01-02). -func (d *Date) UnmarshalText(data []byte) (err error) { - d.Time, err = time.Parse(fullDate, string(data)) - return err -} - -// String returns the Date formatted as an RFC3339 full-date string (i.e., 2006-01-02). -func (d Date) String() string { - return fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day()) -} - -// ToTime returns a Date as a time.Time -func (d Date) ToTime() time.Time { - return d.Time -} diff --git a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/date/time.go b/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/date/time.go deleted file mode 100644 index b453fad..0000000 --- a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/date/time.go +++ /dev/null @@ -1,103 +0,0 @@ -package date - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "regexp" - "time" -) - -// Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. -const ( - azureUtcFormatJSON = `"2006-01-02T15:04:05.999999999"` - azureUtcFormat = "2006-01-02T15:04:05.999999999" - rfc3339JSON = `"` + time.RFC3339Nano + `"` - rfc3339 = time.RFC3339Nano - tzOffsetRegex = `(Z|z|\+|-)(\d+:\d+)*"*$` -) - -// Time defines a type similar to time.Time but assumes a layout of RFC3339 date-time (i.e., -// 2006-01-02T15:04:05Z). -type Time struct { - time.Time -} - -// MarshalBinary preserves the Time as a byte array conforming to RFC3339 date-time (i.e., -// 2006-01-02T15:04:05Z). -func (t Time) MarshalBinary() ([]byte, error) { - return t.Time.MarshalText() -} - -// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC3339 date-time -// (i.e., 2006-01-02T15:04:05Z). -func (t *Time) UnmarshalBinary(data []byte) error { - return t.UnmarshalText(data) -} - -// MarshalJSON preserves the Time as a JSON string conforming to RFC3339 date-time (i.e., -// 2006-01-02T15:04:05Z). -func (t Time) MarshalJSON() (json []byte, err error) { - return t.Time.MarshalJSON() -} - -// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC3339 date-time -// (i.e., 2006-01-02T15:04:05Z). -func (t *Time) UnmarshalJSON(data []byte) (err error) { - timeFormat := azureUtcFormatJSON - match, err := regexp.Match(tzOffsetRegex, data) - if err != nil { - return err - } else if match { - timeFormat = rfc3339JSON - } - t.Time, err = ParseTime(timeFormat, string(data)) - return err -} - -// MarshalText preserves the Time as a byte array conforming to RFC3339 date-time (i.e., -// 2006-01-02T15:04:05Z). -func (t Time) MarshalText() (text []byte, err error) { - return t.Time.MarshalText() -} - -// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC3339 date-time -// (i.e., 2006-01-02T15:04:05Z). -func (t *Time) UnmarshalText(data []byte) (err error) { - timeFormat := azureUtcFormat - match, err := regexp.Match(tzOffsetRegex, data) - if err != nil { - return err - } else if match { - timeFormat = rfc3339 - } - t.Time, err = ParseTime(timeFormat, string(data)) - return err -} - -// String returns the Time formatted as an RFC3339 date-time string (i.e., -// 2006-01-02T15:04:05Z). -func (t Time) String() string { - // Note: time.Time.String does not return an RFC3339 compliant string, time.Time.MarshalText does. - b, err := t.MarshalText() - if err != nil { - return "" - } - return string(b) -} - -// ToTime returns a Time as a time.Time -func (t Time) ToTime() time.Time { - return t.Time -} diff --git a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go b/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go deleted file mode 100644 index 48fb39b..0000000 --- a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go +++ /dev/null @@ -1,100 +0,0 @@ -package date - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "errors" - "time" -) - -const ( - rfc1123JSON = `"` + time.RFC1123 + `"` - rfc1123 = time.RFC1123 -) - -// TimeRFC1123 defines a type similar to time.Time but assumes a layout of RFC1123 date-time (i.e., -// Mon, 02 Jan 2006 15:04:05 MST). -type TimeRFC1123 struct { - time.Time -} - -// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC1123 date-time -// (i.e., Mon, 02 Jan 2006 15:04:05 MST). -func (t *TimeRFC1123) UnmarshalJSON(data []byte) (err error) { - t.Time, err = ParseTime(rfc1123JSON, string(data)) - if err != nil { - return err - } - return nil -} - -// MarshalJSON preserves the Time as a JSON string conforming to RFC1123 date-time (i.e., -// Mon, 02 Jan 2006 15:04:05 MST). -func (t TimeRFC1123) MarshalJSON() ([]byte, error) { - if y := t.Year(); y < 0 || y >= 10000 { - return nil, errors.New("Time.MarshalJSON: year outside of range [0,9999]") - } - b := []byte(t.Format(rfc1123JSON)) - return b, nil -} - -// MarshalText preserves the Time as a byte array conforming to RFC1123 date-time (i.e., -// Mon, 02 Jan 2006 15:04:05 MST). -func (t TimeRFC1123) MarshalText() ([]byte, error) { - if y := t.Year(); y < 0 || y >= 10000 { - return nil, errors.New("Time.MarshalText: year outside of range [0,9999]") - } - - b := []byte(t.Format(rfc1123)) - return b, nil -} - -// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC1123 date-time -// (i.e., Mon, 02 Jan 2006 15:04:05 MST). -func (t *TimeRFC1123) UnmarshalText(data []byte) (err error) { - t.Time, err = ParseTime(rfc1123, string(data)) - if err != nil { - return err - } - return nil -} - -// MarshalBinary preserves the Time as a byte array conforming to RFC1123 date-time (i.e., -// Mon, 02 Jan 2006 15:04:05 MST). -func (t TimeRFC1123) MarshalBinary() ([]byte, error) { - return t.MarshalText() -} - -// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC1123 date-time -// (i.e., Mon, 02 Jan 2006 15:04:05 MST). -func (t *TimeRFC1123) UnmarshalBinary(data []byte) error { - return t.UnmarshalText(data) -} - -// ToTime returns a Time as a time.Time -func (t TimeRFC1123) ToTime() time.Time { - return t.Time -} - -// String returns the Time formatted as an RFC1123 date-time string (i.e., -// Mon, 02 Jan 2006 15:04:05 MST). -func (t TimeRFC1123) String() string { - // Note: time.Time.String does not return an RFC1123 compliant string, time.Time.MarshalText does. - b, err := t.MarshalText() - if err != nil { - return "" - } - return string(b) -} diff --git a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go b/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go deleted file mode 100644 index 7073959..0000000 --- a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go +++ /dev/null @@ -1,123 +0,0 @@ -package date - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "encoding/binary" - "encoding/json" - "time" -) - -// unixEpoch is the moment in time that should be treated as timestamp 0. -var unixEpoch = time.Date(1970, time.January, 1, 0, 0, 0, 0, time.UTC) - -// UnixTime marshals and unmarshals a time that is represented as the number -// of seconds (ignoring skip-seconds) since the Unix Epoch. -type UnixTime time.Time - -// Duration returns the time as a Duration since the UnixEpoch. -func (t UnixTime) Duration() time.Duration { - return time.Time(t).Sub(unixEpoch) -} - -// NewUnixTimeFromSeconds creates a UnixTime as a number of seconds from the UnixEpoch. -func NewUnixTimeFromSeconds(seconds float64) UnixTime { - return NewUnixTimeFromDuration(time.Duration(seconds * float64(time.Second))) -} - -// NewUnixTimeFromNanoseconds creates a UnixTime as a number of nanoseconds from the UnixEpoch. -func NewUnixTimeFromNanoseconds(nanoseconds int64) UnixTime { - return NewUnixTimeFromDuration(time.Duration(nanoseconds)) -} - -// NewUnixTimeFromDuration creates a UnixTime as a duration of time since the UnixEpoch. -func NewUnixTimeFromDuration(dur time.Duration) UnixTime { - return UnixTime(unixEpoch.Add(dur)) -} - -// UnixEpoch retreives the moment considered the Unix Epoch. I.e. The time represented by '0' -func UnixEpoch() time.Time { - return unixEpoch -} - -// MarshalJSON preserves the UnixTime as a JSON number conforming to Unix Timestamp requirements. -// (i.e. the number of seconds since midnight January 1st, 1970 not considering leap seconds.) -func (t UnixTime) MarshalJSON() ([]byte, error) { - buffer := &bytes.Buffer{} - enc := json.NewEncoder(buffer) - err := enc.Encode(float64(time.Time(t).UnixNano()) / 1e9) - if err != nil { - return nil, err - } - return buffer.Bytes(), nil -} - -// UnmarshalJSON reconstitures a UnixTime saved as a JSON number of the number of seconds since -// midnight January 1st, 1970. -func (t *UnixTime) UnmarshalJSON(text []byte) error { - dec := json.NewDecoder(bytes.NewReader(text)) - - var secondsSinceEpoch float64 - if err := dec.Decode(&secondsSinceEpoch); err != nil { - return err - } - - *t = NewUnixTimeFromSeconds(secondsSinceEpoch) - - return nil -} - -// MarshalText stores the number of seconds since the Unix Epoch as a textual floating point number. -func (t UnixTime) MarshalText() ([]byte, error) { - cast := time.Time(t) - return cast.MarshalText() -} - -// UnmarshalText populates a UnixTime with a value stored textually as a floating point number of seconds since the Unix Epoch. -func (t *UnixTime) UnmarshalText(raw []byte) error { - var unmarshaled time.Time - - if err := unmarshaled.UnmarshalText(raw); err != nil { - return err - } - - *t = UnixTime(unmarshaled) - return nil -} - -// MarshalBinary converts a UnixTime into a binary.LittleEndian float64 of nanoseconds since the epoch. -func (t UnixTime) MarshalBinary() ([]byte, error) { - buf := &bytes.Buffer{} - - payload := int64(t.Duration()) - - if err := binary.Write(buf, binary.LittleEndian, &payload); err != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -// UnmarshalBinary converts a from a binary.LittleEndian float64 of nanoseconds since the epoch into a UnixTime. -func (t *UnixTime) UnmarshalBinary(raw []byte) error { - var nanosecondsSinceEpoch int64 - - if err := binary.Read(bytes.NewReader(raw), binary.LittleEndian, &nanosecondsSinceEpoch); err != nil { - return err - } - *t = NewUnixTimeFromNanoseconds(nanosecondsSinceEpoch) - return nil -} diff --git a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/date/utility.go b/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/date/utility.go deleted file mode 100644 index 12addf0..0000000 --- a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/date/utility.go +++ /dev/null @@ -1,25 +0,0 @@ -package date - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "strings" - "time" -) - -// ParseTime to parse Time string to specified format. -func ParseTime(format string, t string) (d time.Time, err error) { - return time.Parse(format, strings.ToUpper(t)) -} diff --git a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/error.go b/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/error.go deleted file mode 100644 index f724f33..0000000 --- a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/error.go +++ /dev/null @@ -1,98 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "fmt" - "net/http" -) - -const ( - // UndefinedStatusCode is used when HTTP status code is not available for an error. - UndefinedStatusCode = 0 -) - -// DetailedError encloses a error with details of the package, method, and associated HTTP -// status code (if any). -type DetailedError struct { - Original error - - // PackageType is the package type of the object emitting the error. For types, the value - // matches that produced the the '%T' format specifier of the fmt package. For other elements, - // such as functions, it is just the package name (e.g., "autorest"). - PackageType string - - // Method is the name of the method raising the error. - Method string - - // StatusCode is the HTTP Response StatusCode (if non-zero) that led to the error. - StatusCode interface{} - - // Message is the error message. - Message string - - // Service Error is the response body of failed API in bytes - ServiceError []byte - - // Response is the response object that was returned during failure if applicable. - Response *http.Response -} - -// NewError creates a new Error conforming object from the passed packageType, method, and -// message. message is treated as a format string to which the optional args apply. -func NewError(packageType string, method string, message string, args ...interface{}) DetailedError { - return NewErrorWithError(nil, packageType, method, nil, message, args...) -} - -// NewErrorWithResponse creates a new Error conforming object from the passed -// packageType, method, statusCode of the given resp (UndefinedStatusCode if -// resp is nil), and message. message is treated as a format string to which the -// optional args apply. -func NewErrorWithResponse(packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError { - return NewErrorWithError(nil, packageType, method, resp, message, args...) -} - -// NewErrorWithError creates a new Error conforming object from the -// passed packageType, method, statusCode of the given resp (UndefinedStatusCode -// if resp is nil), message, and original error. message is treated as a format -// string to which the optional args apply. -func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError { - if v, ok := original.(DetailedError); ok { - return v - } - - statusCode := UndefinedStatusCode - if resp != nil { - statusCode = resp.StatusCode - } - - return DetailedError{ - Original: original, - PackageType: packageType, - Method: method, - StatusCode: statusCode, - Message: fmt.Sprintf(message, args...), - Response: resp, - } -} - -// Error returns a formatted containing all available details (i.e., PackageType, Method, -// StatusCode, Message, and original error (if any)). -func (e DetailedError) Error() string { - if e.Original == nil { - return fmt.Sprintf("%s#%s: %s: StatusCode=%d", e.PackageType, e.Method, e.Message, e.StatusCode) - } - return fmt.Sprintf("%s#%s: %s: StatusCode=%d -- Original Error: %v", e.PackageType, e.Method, e.Message, e.StatusCode, e.Original) -} diff --git a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/preparer.go b/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/preparer.go deleted file mode 100644 index 6d67bd7..0000000 --- a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/preparer.go +++ /dev/null @@ -1,480 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "mime/multipart" - "net/http" - "net/url" - "strings" -) - -const ( - mimeTypeJSON = "application/json" - mimeTypeOctetStream = "application/octet-stream" - mimeTypeFormPost = "application/x-www-form-urlencoded" - - headerAuthorization = "Authorization" - headerContentType = "Content-Type" - headerUserAgent = "User-Agent" -) - -// Preparer is the interface that wraps the Prepare method. -// -// Prepare accepts and possibly modifies an http.Request (e.g., adding Headers). Implementations -// must ensure to not share or hold per-invocation state since Preparers may be shared and re-used. -type Preparer interface { - Prepare(*http.Request) (*http.Request, error) -} - -// PreparerFunc is a method that implements the Preparer interface. -type PreparerFunc func(*http.Request) (*http.Request, error) - -// Prepare implements the Preparer interface on PreparerFunc. -func (pf PreparerFunc) Prepare(r *http.Request) (*http.Request, error) { - return pf(r) -} - -// PrepareDecorator takes and possibly decorates, by wrapping, a Preparer. Decorators may affect the -// http.Request and pass it along or, first, pass the http.Request along then affect the result. -type PrepareDecorator func(Preparer) Preparer - -// CreatePreparer creates, decorates, and returns a Preparer. -// Without decorators, the returned Preparer returns the passed http.Request unmodified. -// Preparers are safe to share and re-use. -func CreatePreparer(decorators ...PrepareDecorator) Preparer { - return DecoratePreparer( - Preparer(PreparerFunc(func(r *http.Request) (*http.Request, error) { return r, nil })), - decorators...) -} - -// DecoratePreparer accepts a Preparer and a, possibly empty, set of PrepareDecorators, which it -// applies to the Preparer. Decorators are applied in the order received, but their affect upon the -// request depends on whether they are a pre-decorator (change the http.Request and then pass it -// along) or a post-decorator (pass the http.Request along and alter it on return). -func DecoratePreparer(p Preparer, decorators ...PrepareDecorator) Preparer { - for _, decorate := range decorators { - p = decorate(p) - } - return p -} - -// Prepare accepts an http.Request and a, possibly empty, set of PrepareDecorators. -// It creates a Preparer from the decorators which it then applies to the passed http.Request. -func Prepare(r *http.Request, decorators ...PrepareDecorator) (*http.Request, error) { - if r == nil { - return nil, NewError("autorest", "Prepare", "Invoked without an http.Request") - } - return CreatePreparer(decorators...).Prepare(r) -} - -// WithNothing returns a "do nothing" PrepareDecorator that makes no changes to the passed -// http.Request. -func WithNothing() PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - return p.Prepare(r) - }) - } -} - -// WithHeader returns a PrepareDecorator that sets the specified HTTP header of the http.Request to -// the passed value. It canonicalizes the passed header name (via http.CanonicalHeaderKey) before -// adding the header. -func WithHeader(header string, value string) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.Header == nil { - r.Header = make(http.Header) - } - r.Header.Set(http.CanonicalHeaderKey(header), value) - } - return r, err - }) - } -} - -// WithHeaders returns a PrepareDecorator that sets the specified HTTP headers of the http.Request to -// the passed value. It canonicalizes the passed headers name (via http.CanonicalHeaderKey) before -// adding them. -func WithHeaders(headers map[string]interface{}) PrepareDecorator { - h := ensureValueStrings(headers) - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.Header == nil { - r.Header = make(http.Header) - } - - for name, value := range h { - r.Header.Set(http.CanonicalHeaderKey(name), value) - } - } - return r, err - }) - } -} - -// WithBearerAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose -// value is "Bearer " followed by the supplied token. -func WithBearerAuthorization(token string) PrepareDecorator { - return WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", token)) -} - -// AsContentType returns a PrepareDecorator that adds an HTTP Content-Type header whose value -// is the passed contentType. -func AsContentType(contentType string) PrepareDecorator { - return WithHeader(headerContentType, contentType) -} - -// WithUserAgent returns a PrepareDecorator that adds an HTTP User-Agent header whose value is the -// passed string. -func WithUserAgent(ua string) PrepareDecorator { - return WithHeader(headerUserAgent, ua) -} - -// AsFormURLEncoded returns a PrepareDecorator that adds an HTTP Content-Type header whose value is -// "application/x-www-form-urlencoded". -func AsFormURLEncoded() PrepareDecorator { - return AsContentType(mimeTypeFormPost) -} - -// AsJSON returns a PrepareDecorator that adds an HTTP Content-Type header whose value is -// "application/json". -func AsJSON() PrepareDecorator { - return AsContentType(mimeTypeJSON) -} - -// AsOctetStream returns a PrepareDecorator that adds the "application/octet-stream" Content-Type header. -func AsOctetStream() PrepareDecorator { - return AsContentType(mimeTypeOctetStream) -} - -// WithMethod returns a PrepareDecorator that sets the HTTP method of the passed request. The -// decorator does not validate that the passed method string is a known HTTP method. -func WithMethod(method string) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r.Method = method - return p.Prepare(r) - }) - } -} - -// AsDelete returns a PrepareDecorator that sets the HTTP method to DELETE. -func AsDelete() PrepareDecorator { return WithMethod("DELETE") } - -// AsGet returns a PrepareDecorator that sets the HTTP method to GET. -func AsGet() PrepareDecorator { return WithMethod("GET") } - -// AsHead returns a PrepareDecorator that sets the HTTP method to HEAD. -func AsHead() PrepareDecorator { return WithMethod("HEAD") } - -// AsOptions returns a PrepareDecorator that sets the HTTP method to OPTIONS. -func AsOptions() PrepareDecorator { return WithMethod("OPTIONS") } - -// AsPatch returns a PrepareDecorator that sets the HTTP method to PATCH. -func AsPatch() PrepareDecorator { return WithMethod("PATCH") } - -// AsPost returns a PrepareDecorator that sets the HTTP method to POST. -func AsPost() PrepareDecorator { return WithMethod("POST") } - -// AsPut returns a PrepareDecorator that sets the HTTP method to PUT. -func AsPut() PrepareDecorator { return WithMethod("PUT") } - -// WithBaseURL returns a PrepareDecorator that populates the http.Request with a url.URL constructed -// from the supplied baseUrl. -func WithBaseURL(baseURL string) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - var u *url.URL - if u, err = url.Parse(baseURL); err != nil { - return r, err - } - if u.Scheme == "" { - err = fmt.Errorf("autorest: No scheme detected in URL %s", baseURL) - } - if err == nil { - r.URL = u - } - } - return r, err - }) - } -} - -// WithCustomBaseURL returns a PrepareDecorator that replaces brace-enclosed keys within the -// request base URL (i.e., http.Request.URL) with the corresponding values from the passed map. -func WithCustomBaseURL(baseURL string, urlParameters map[string]interface{}) PrepareDecorator { - parameters := ensureValueStrings(urlParameters) - for key, value := range parameters { - baseURL = strings.Replace(baseURL, "{"+key+"}", value, -1) - } - return WithBaseURL(baseURL) -} - -// WithFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) into the -// http.Request body. -func WithFormData(v url.Values) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - s := v.Encode() - - if r.Header == nil { - r.Header = make(http.Header) - } - r.Header.Set(http.CanonicalHeaderKey(headerContentType), mimeTypeFormPost) - r.ContentLength = int64(len(s)) - r.Body = ioutil.NopCloser(strings.NewReader(s)) - } - return r, err - }) - } -} - -// WithMultiPartFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) form parameters -// into the http.Request body. -func WithMultiPartFormData(formDataParameters map[string]interface{}) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - var body bytes.Buffer - writer := multipart.NewWriter(&body) - for key, value := range formDataParameters { - if rc, ok := value.(io.ReadCloser); ok { - var fd io.Writer - if fd, err = writer.CreateFormFile(key, key); err != nil { - return r, err - } - if _, err = io.Copy(fd, rc); err != nil { - return r, err - } - } else { - if err = writer.WriteField(key, ensureValueString(value)); err != nil { - return r, err - } - } - } - if err = writer.Close(); err != nil { - return r, err - } - if r.Header == nil { - r.Header = make(http.Header) - } - r.Header.Set(http.CanonicalHeaderKey(headerContentType), writer.FormDataContentType()) - r.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes())) - r.ContentLength = int64(body.Len()) - return r, err - } - return r, err - }) - } -} - -// WithFile returns a PrepareDecorator that sends file in request body. -func WithFile(f io.ReadCloser) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - b, err := ioutil.ReadAll(f) - if err != nil { - return r, err - } - r.Body = ioutil.NopCloser(bytes.NewReader(b)) - r.ContentLength = int64(len(b)) - } - return r, err - }) - } -} - -// WithBool returns a PrepareDecorator that encodes the passed bool into the body of the request -// and sets the Content-Length header. -func WithBool(v bool) PrepareDecorator { - return WithString(fmt.Sprintf("%v", v)) -} - -// WithFloat32 returns a PrepareDecorator that encodes the passed float32 into the body of the -// request and sets the Content-Length header. -func WithFloat32(v float32) PrepareDecorator { - return WithString(fmt.Sprintf("%v", v)) -} - -// WithFloat64 returns a PrepareDecorator that encodes the passed float64 into the body of the -// request and sets the Content-Length header. -func WithFloat64(v float64) PrepareDecorator { - return WithString(fmt.Sprintf("%v", v)) -} - -// WithInt32 returns a PrepareDecorator that encodes the passed int32 into the body of the request -// and sets the Content-Length header. -func WithInt32(v int32) PrepareDecorator { - return WithString(fmt.Sprintf("%v", v)) -} - -// WithInt64 returns a PrepareDecorator that encodes the passed int64 into the body of the request -// and sets the Content-Length header. -func WithInt64(v int64) PrepareDecorator { - return WithString(fmt.Sprintf("%v", v)) -} - -// WithString returns a PrepareDecorator that encodes the passed string into the body of the request -// and sets the Content-Length header. -func WithString(v string) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - r.ContentLength = int64(len(v)) - r.Body = ioutil.NopCloser(strings.NewReader(v)) - } - return r, err - }) - } -} - -// WithJSON returns a PrepareDecorator that encodes the data passed as JSON into the body of the -// request and sets the Content-Length header. -func WithJSON(v interface{}) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - b, err := json.Marshal(v) - if err == nil { - r.ContentLength = int64(len(b)) - r.Body = ioutil.NopCloser(bytes.NewReader(b)) - } - } - return r, err - }) - } -} - -// WithPath returns a PrepareDecorator that adds the supplied path to the request URL. If the path -// is absolute (that is, it begins with a "/"), it replaces the existing path. -func WithPath(path string) PrepareDecorator { - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.URL == nil { - return r, NewError("autorest", "WithPath", "Invoked with a nil URL") - } - if r.URL, err = parseURL(r.URL, path); err != nil { - return r, err - } - } - return r, err - }) - } -} - -// WithEscapedPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the -// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. The -// values will be escaped (aka URL encoded) before insertion into the path. -func WithEscapedPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator { - parameters := escapeValueStrings(ensureValueStrings(pathParameters)) - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.URL == nil { - return r, NewError("autorest", "WithEscapedPathParameters", "Invoked with a nil URL") - } - for key, value := range parameters { - path = strings.Replace(path, "{"+key+"}", value, -1) - } - if r.URL, err = parseURL(r.URL, path); err != nil { - return r, err - } - } - return r, err - }) - } -} - -// WithPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the -// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. -func WithPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator { - parameters := ensureValueStrings(pathParameters) - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.URL == nil { - return r, NewError("autorest", "WithPathParameters", "Invoked with a nil URL") - } - for key, value := range parameters { - path = strings.Replace(path, "{"+key+"}", value, -1) - } - - if r.URL, err = parseURL(r.URL, path); err != nil { - return r, err - } - } - return r, err - }) - } -} - -func parseURL(u *url.URL, path string) (*url.URL, error) { - p := strings.TrimRight(u.String(), "/") - if !strings.HasPrefix(path, "/") { - path = "/" + path - } - return url.Parse(p + path) -} - -// WithQueryParameters returns a PrepareDecorators that encodes and applies the query parameters -// given in the supplied map (i.e., key=value). -func WithQueryParameters(queryParameters map[string]interface{}) PrepareDecorator { - parameters := ensureValueStrings(queryParameters) - return func(p Preparer) Preparer { - return PreparerFunc(func(r *http.Request) (*http.Request, error) { - r, err := p.Prepare(r) - if err == nil { - if r.URL == nil { - return r, NewError("autorest", "WithQueryParameters", "Invoked with a nil URL") - } - - v := r.URL.Query() - for key, value := range parameters { - d, err := url.QueryUnescape(value) - if err != nil { - return r, err - } - v.Add(key, d) - } - r.URL.RawQuery = v.Encode() - } - return r, err - }) - } -} diff --git a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/responder.go b/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/responder.go deleted file mode 100644 index a908a0a..0000000 --- a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/responder.go +++ /dev/null @@ -1,250 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "encoding/json" - "encoding/xml" - "fmt" - "io" - "io/ioutil" - "net/http" - "strings" -) - -// Responder is the interface that wraps the Respond method. -// -// Respond accepts and reacts to an http.Response. Implementations must ensure to not share or hold -// state since Responders may be shared and re-used. -type Responder interface { - Respond(*http.Response) error -} - -// ResponderFunc is a method that implements the Responder interface. -type ResponderFunc func(*http.Response) error - -// Respond implements the Responder interface on ResponderFunc. -func (rf ResponderFunc) Respond(r *http.Response) error { - return rf(r) -} - -// RespondDecorator takes and possibly decorates, by wrapping, a Responder. Decorators may react to -// the http.Response and pass it along or, first, pass the http.Response along then react. -type RespondDecorator func(Responder) Responder - -// CreateResponder creates, decorates, and returns a Responder. Without decorators, the returned -// Responder returns the passed http.Response unmodified. Responders may or may not be safe to share -// and re-used: It depends on the applied decorators. For example, a standard decorator that closes -// the response body is fine to share whereas a decorator that reads the body into a passed struct -// is not. -// -// To prevent memory leaks, ensure that at least one Responder closes the response body. -func CreateResponder(decorators ...RespondDecorator) Responder { - return DecorateResponder( - Responder(ResponderFunc(func(r *http.Response) error { return nil })), - decorators...) -} - -// DecorateResponder accepts a Responder and a, possibly empty, set of RespondDecorators, which it -// applies to the Responder. Decorators are applied in the order received, but their affect upon the -// request depends on whether they are a pre-decorator (react to the http.Response and then pass it -// along) or a post-decorator (pass the http.Response along and then react). -func DecorateResponder(r Responder, decorators ...RespondDecorator) Responder { - for _, decorate := range decorators { - r = decorate(r) - } - return r -} - -// Respond accepts an http.Response and a, possibly empty, set of RespondDecorators. -// It creates a Responder from the decorators it then applies to the passed http.Response. -func Respond(r *http.Response, decorators ...RespondDecorator) error { - if r == nil { - return nil - } - return CreateResponder(decorators...).Respond(r) -} - -// ByIgnoring returns a RespondDecorator that ignores the passed http.Response passing it unexamined -// to the next RespondDecorator. -func ByIgnoring() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - return r.Respond(resp) - }) - } -} - -// ByCopying copies the contents of the http.Response Body into the passed bytes.Buffer as -// the Body is read. -func ByCopying(b *bytes.Buffer) RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil && resp != nil && resp.Body != nil { - resp.Body = TeeReadCloser(resp.Body, b) - } - return err - }) - } -} - -// ByDiscardingBody returns a RespondDecorator that first invokes the passed Responder after which -// it copies the remaining bytes (if any) in the response body to ioutil.Discard. Since the passed -// Responder is invoked prior to discarding the response body, the decorator may occur anywhere -// within the set. -func ByDiscardingBody() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil && resp != nil && resp.Body != nil { - if _, err := io.Copy(ioutil.Discard, resp.Body); err != nil { - return fmt.Errorf("Error discarding the response body: %v", err) - } - } - return err - }) - } -} - -// ByClosing returns a RespondDecorator that first invokes the passed Responder after which it -// closes the response body. Since the passed Responder is invoked prior to closing the response -// body, the decorator may occur anywhere within the set. -func ByClosing() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if resp != nil && resp.Body != nil { - if err := resp.Body.Close(); err != nil { - return fmt.Errorf("Error closing the response body: %v", err) - } - } - return err - }) - } -} - -// ByClosingIfError returns a RespondDecorator that first invokes the passed Responder after which -// it closes the response if the passed Responder returns an error and the response body exists. -func ByClosingIfError() RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err != nil && resp != nil && resp.Body != nil { - if err := resp.Body.Close(); err != nil { - return fmt.Errorf("Error closing the response body: %v", err) - } - } - return err - }) - } -} - -// ByUnmarshallingJSON returns a RespondDecorator that decodes a JSON document returned in the -// response Body into the value pointed to by v. -func ByUnmarshallingJSON(v interface{}) RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil { - b, errInner := ioutil.ReadAll(resp.Body) - // Some responses might include a BOM, remove for successful unmarshalling - b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf")) - if errInner != nil { - err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) - } else if len(strings.Trim(string(b), " ")) > 0 { - errInner = json.Unmarshal(b, v) - if errInner != nil { - err = fmt.Errorf("Error occurred unmarshalling JSON - Error = '%v' JSON = '%s'", errInner, string(b)) - } - } - } - return err - }) - } -} - -// ByUnmarshallingXML returns a RespondDecorator that decodes a XML document returned in the -// response Body into the value pointed to by v. -func ByUnmarshallingXML(v interface{}) RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil { - b, errInner := ioutil.ReadAll(resp.Body) - if errInner != nil { - err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) - } else { - errInner = xml.Unmarshal(b, v) - if errInner != nil { - err = fmt.Errorf("Error occurred unmarshalling Xml - Error = '%v' Xml = '%s'", errInner, string(b)) - } - } - } - return err - }) - } -} - -// WithErrorUnlessStatusCode returns a RespondDecorator that emits an error unless the response -// StatusCode is among the set passed. On error, response body is fully read into a buffer and -// presented in the returned error, as well as in the response body. -func WithErrorUnlessStatusCode(codes ...int) RespondDecorator { - return func(r Responder) Responder { - return ResponderFunc(func(resp *http.Response) error { - err := r.Respond(resp) - if err == nil && !ResponseHasStatusCode(resp, codes...) { - derr := NewErrorWithResponse("autorest", "WithErrorUnlessStatusCode", resp, "%v %v failed with %s", - resp.Request.Method, - resp.Request.URL, - resp.Status) - if resp.Body != nil { - defer resp.Body.Close() - b, _ := ioutil.ReadAll(resp.Body) - derr.ServiceError = b - resp.Body = ioutil.NopCloser(bytes.NewReader(b)) - } - err = derr - } - return err - }) - } -} - -// WithErrorUnlessOK returns a RespondDecorator that emits an error if the response StatusCode is -// anything other than HTTP 200. -func WithErrorUnlessOK() RespondDecorator { - return WithErrorUnlessStatusCode(http.StatusOK) -} - -// ExtractHeader extracts all values of the specified header from the http.Response. It returns an -// empty string slice if the passed http.Response is nil or the header does not exist. -func ExtractHeader(header string, resp *http.Response) []string { - if resp != nil && resp.Header != nil { - return resp.Header[http.CanonicalHeaderKey(header)] - } - return nil -} - -// ExtractHeaderValue extracts the first value of the specified header from the http.Response. It -// returns an empty string if the passed http.Response is nil or the header does not exist. -func ExtractHeaderValue(header string, resp *http.Response) string { - h := ExtractHeader(header, resp) - if len(h) > 0 { - return h[0] - } - return "" -} diff --git a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go b/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go deleted file mode 100644 index fa11dbe..0000000 --- a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go +++ /dev/null @@ -1,52 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "io" - "io/ioutil" - "net/http" -) - -// NewRetriableRequest returns a wrapper around an HTTP request that support retry logic. -func NewRetriableRequest(req *http.Request) *RetriableRequest { - return &RetriableRequest{req: req} -} - -// Request returns the wrapped HTTP request. -func (rr *RetriableRequest) Request() *http.Request { - return rr.req -} - -func (rr *RetriableRequest) prepareFromByteReader() (err error) { - // fall back to making a copy (only do this once) - b := []byte{} - if rr.req.ContentLength > 0 { - b = make([]byte, rr.req.ContentLength) - _, err = io.ReadFull(rr.req.Body, b) - if err != nil { - return err - } - } else { - b, err = ioutil.ReadAll(rr.req.Body) - if err != nil { - return err - } - } - rr.br = bytes.NewReader(b) - rr.req.Body = ioutil.NopCloser(rr.br) - return err -} diff --git a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go b/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go deleted file mode 100644 index 7143cc6..0000000 --- a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go +++ /dev/null @@ -1,54 +0,0 @@ -// +build !go1.8 - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package autorest - -import ( - "bytes" - "io/ioutil" - "net/http" -) - -// RetriableRequest provides facilities for retrying an HTTP request. -type RetriableRequest struct { - req *http.Request - br *bytes.Reader -} - -// Prepare signals that the request is about to be sent. -func (rr *RetriableRequest) Prepare() (err error) { - // preserve the request body; this is to support retry logic as - // the underlying transport will always close the reqeust body - if rr.req.Body != nil { - if rr.br != nil { - _, err = rr.br.Seek(0, 0 /*io.SeekStart*/) - rr.req.Body = ioutil.NopCloser(rr.br) - } - if err != nil { - return err - } - if rr.br == nil { - // fall back to making a copy (only do this once) - err = rr.prepareFromByteReader() - } - } - return err -} - -func removeRequestBody(req *http.Request) { - req.Body = nil - req.ContentLength = 0 -} diff --git a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go b/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go deleted file mode 100644 index ae15c6b..0000000 --- a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go +++ /dev/null @@ -1,66 +0,0 @@ -// +build go1.8 - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package autorest - -import ( - "bytes" - "io" - "io/ioutil" - "net/http" -) - -// RetriableRequest provides facilities for retrying an HTTP request. -type RetriableRequest struct { - req *http.Request - rc io.ReadCloser - br *bytes.Reader -} - -// Prepare signals that the request is about to be sent. -func (rr *RetriableRequest) Prepare() (err error) { - // preserve the request body; this is to support retry logic as - // the underlying transport will always close the reqeust body - if rr.req.Body != nil { - if rr.rc != nil { - rr.req.Body = rr.rc - } else if rr.br != nil { - _, err = rr.br.Seek(0, io.SeekStart) - rr.req.Body = ioutil.NopCloser(rr.br) - } - if err != nil { - return err - } - if rr.req.GetBody != nil { - // this will allow us to preserve the body without having to - // make a copy. note we need to do this on each iteration - rr.rc, err = rr.req.GetBody() - if err != nil { - return err - } - } else if rr.br == nil { - // fall back to making a copy (only do this once) - err = rr.prepareFromByteReader() - } - } - return err -} - -func removeRequestBody(req *http.Request) { - req.Body = nil - req.GetBody = nil - req.ContentLength = 0 -} diff --git a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/sender.go b/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/sender.go deleted file mode 100644 index 6665d7c..0000000 --- a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/sender.go +++ /dev/null @@ -1,326 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "fmt" - "log" - "math" - "net/http" - "strconv" - "time" - - "github.com/Azure/go-autorest/tracing" -) - -// Sender is the interface that wraps the Do method to send HTTP requests. -// -// The standard http.Client conforms to this interface. -type Sender interface { - Do(*http.Request) (*http.Response, error) -} - -// SenderFunc is a method that implements the Sender interface. -type SenderFunc func(*http.Request) (*http.Response, error) - -// Do implements the Sender interface on SenderFunc. -func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { - return sf(r) -} - -// SendDecorator takes and possibly decorates, by wrapping, a Sender. Decorators may affect the -// http.Request and pass it along or, first, pass the http.Request along then react to the -// http.Response result. -type SendDecorator func(Sender) Sender - -// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. -func CreateSender(decorators ...SendDecorator) Sender { - return DecorateSender(&http.Client{}, decorators...) -} - -// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to -// the Sender. Decorators are applied in the order received, but their affect upon the request -// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a -// post-decorator (pass the http.Request along and react to the results in http.Response). -func DecorateSender(s Sender, decorators ...SendDecorator) Sender { - for _, decorate := range decorators { - s = decorate(s) - } - return s -} - -// Send sends, by means of the default http.Client, the passed http.Request, returning the -// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which -// it will apply the http.Client before invoking the Do method. -// -// Send is a convenience method and not recommended for production. Advanced users should use -// SendWithSender, passing and sharing their own Sender (e.g., instance of http.Client). -// -// Send will not poll or retry requests. -func Send(r *http.Request, decorators ...SendDecorator) (*http.Response, error) { - return SendWithSender(&http.Client{Transport: tracing.Transport}, r, decorators...) -} - -// SendWithSender sends the passed http.Request, through the provided Sender, returning the -// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which -// it will apply the http.Client before invoking the Do method. -// -// SendWithSender will not poll or retry requests. -func SendWithSender(s Sender, r *http.Request, decorators ...SendDecorator) (*http.Response, error) { - return DecorateSender(s, decorators...).Do(r) -} - -// AfterDelay returns a SendDecorator that delays for the passed time.Duration before -// invoking the Sender. The delay may be terminated by closing the optional channel on the -// http.Request. If canceled, no further Senders are invoked. -func AfterDelay(d time.Duration) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - if !DelayForBackoff(d, 0, r.Context().Done()) { - return nil, fmt.Errorf("autorest: AfterDelay canceled before full delay") - } - return s.Do(r) - }) - } -} - -// AsIs returns a SendDecorator that invokes the passed Sender without modifying the http.Request. -func AsIs() SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - return s.Do(r) - }) - } -} - -// DoCloseIfError returns a SendDecorator that first invokes the passed Sender after which -// it closes the response if the passed Sender returns an error and the response body exists. -func DoCloseIfError() SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - resp, err := s.Do(r) - if err != nil { - Respond(resp, ByDiscardingBody(), ByClosing()) - } - return resp, err - }) - } -} - -// DoErrorIfStatusCode returns a SendDecorator that emits an error if the response StatusCode is -// among the set passed. Since these are artificial errors, the response body may still require -// closing. -func DoErrorIfStatusCode(codes ...int) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - resp, err := s.Do(r) - if err == nil && ResponseHasStatusCode(resp, codes...) { - err = NewErrorWithResponse("autorest", "DoErrorIfStatusCode", resp, "%v %v failed with %s", - resp.Request.Method, - resp.Request.URL, - resp.Status) - } - return resp, err - }) - } -} - -// DoErrorUnlessStatusCode returns a SendDecorator that emits an error unless the response -// StatusCode is among the set passed. Since these are artificial errors, the response body -// may still require closing. -func DoErrorUnlessStatusCode(codes ...int) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - resp, err := s.Do(r) - if err == nil && !ResponseHasStatusCode(resp, codes...) { - err = NewErrorWithResponse("autorest", "DoErrorUnlessStatusCode", resp, "%v %v failed with %s", - resp.Request.Method, - resp.Request.URL, - resp.Status) - } - return resp, err - }) - } -} - -// DoPollForStatusCodes returns a SendDecorator that polls if the http.Response contains one of the -// passed status codes. It expects the http.Response to contain a Location header providing the -// URL at which to poll (using GET) and will poll until the time passed is equal to or greater than -// the supplied duration. It will delay between requests for the duration specified in the -// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled by -// closing the optional channel on the http.Request. -func DoPollForStatusCodes(duration time.Duration, delay time.Duration, codes ...int) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { - resp, err = s.Do(r) - - if err == nil && ResponseHasStatusCode(resp, codes...) { - r, err = NewPollingRequestWithContext(r.Context(), resp) - - for err == nil && ResponseHasStatusCode(resp, codes...) { - Respond(resp, - ByDiscardingBody(), - ByClosing()) - resp, err = SendWithSender(s, r, - AfterDelay(GetRetryAfter(resp, delay))) - } - } - - return resp, err - }) - } -} - -// DoRetryForAttempts returns a SendDecorator that retries a failed request for up to the specified -// number of attempts, exponentially backing off between requests using the supplied backoff -// time.Duration (which may be zero). Retrying may be canceled by closing the optional channel on -// the http.Request. -func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { - rr := NewRetriableRequest(r) - for attempt := 0; attempt < attempts; attempt++ { - err = rr.Prepare() - if err != nil { - return resp, err - } - resp, err = s.Do(rr.Request()) - if err == nil { - return resp, err - } - if !DelayForBackoff(backoff, attempt, r.Context().Done()) { - return nil, r.Context().Err() - } - } - return resp, err - }) - } -} - -// DoRetryForStatusCodes returns a SendDecorator that retries for specified statusCodes for up to the specified -// number of attempts, exponentially backing off between requests using the supplied backoff -// time.Duration (which may be zero). Retrying may be canceled by closing the optional channel on -// the http.Request. -func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { - rr := NewRetriableRequest(r) - // Increment to add the first call (attempts denotes number of retries) - for attempt := 0; attempt < attempts+1; { - err = rr.Prepare() - if err != nil { - return resp, err - } - resp, err = s.Do(rr.Request()) - // if the error isn't temporary don't bother retrying - if err != nil && !IsTemporaryNetworkError(err) { - return nil, err - } - // we want to retry if err is not nil (e.g. transient network failure). note that for failed authentication - // resp and err will both have a value, so in this case we don't want to retry as it will never succeed. - if err == nil && !ResponseHasStatusCode(resp, codes...) || IsTokenRefreshError(err) { - return resp, err - } - delayed := DelayWithRetryAfter(resp, r.Context().Done()) - if !delayed && !DelayForBackoff(backoff, attempt, r.Context().Done()) { - return resp, r.Context().Err() - } - // don't count a 429 against the number of attempts - // so that we continue to retry until it succeeds - if resp == nil || resp.StatusCode != http.StatusTooManyRequests { - attempt++ - } - } - return resp, err - }) - } -} - -// DelayWithRetryAfter invokes time.After for the duration specified in the "Retry-After" header in -// responses with status code 429 -func DelayWithRetryAfter(resp *http.Response, cancel <-chan struct{}) bool { - if resp == nil { - return false - } - retryAfter, _ := strconv.Atoi(resp.Header.Get("Retry-After")) - if resp.StatusCode == http.StatusTooManyRequests && retryAfter > 0 { - select { - case <-time.After(time.Duration(retryAfter) * time.Second): - return true - case <-cancel: - return false - } - } - return false -} - -// DoRetryForDuration returns a SendDecorator that retries the request until the total time is equal -// to or greater than the specified duration, exponentially backing off between requests using the -// supplied backoff time.Duration (which may be zero). Retrying may be canceled by closing the -// optional channel on the http.Request. -func DoRetryForDuration(d time.Duration, backoff time.Duration) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { - rr := NewRetriableRequest(r) - end := time.Now().Add(d) - for attempt := 0; time.Now().Before(end); attempt++ { - err = rr.Prepare() - if err != nil { - return resp, err - } - resp, err = s.Do(rr.Request()) - if err == nil { - return resp, err - } - if !DelayForBackoff(backoff, attempt, r.Context().Done()) { - return nil, r.Context().Err() - } - } - return resp, err - }) - } -} - -// WithLogging returns a SendDecorator that implements simple before and after logging of the -// request. -func WithLogging(logger *log.Logger) SendDecorator { - return func(s Sender) Sender { - return SenderFunc(func(r *http.Request) (*http.Response, error) { - logger.Printf("Sending %s %s", r.Method, r.URL) - resp, err := s.Do(r) - if err != nil { - logger.Printf("%s %s received error '%v'", r.Method, r.URL, err) - } else { - logger.Printf("%s %s received %s", r.Method, r.URL, resp.Status) - } - return resp, err - }) - } -} - -// DelayForBackoff invokes time.After for the supplied backoff duration raised to the power of -// passed attempt (i.e., an exponential backoff delay). Backoff duration is in seconds and can set -// to zero for no delay. The delay may be canceled by closing the passed channel. If terminated early, -// returns false. -// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt -// count. -func DelayForBackoff(backoff time.Duration, attempt int, cancel <-chan struct{}) bool { - select { - case <-time.After(time.Duration(backoff.Seconds()*math.Pow(2, float64(attempt))) * time.Second): - return true - case <-cancel: - return false - } -} diff --git a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/utility.go b/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/utility.go deleted file mode 100644 index 08cf11c..0000000 --- a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/utility.go +++ /dev/null @@ -1,228 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "encoding/json" - "encoding/xml" - "fmt" - "io" - "net" - "net/http" - "net/url" - "reflect" - "strings" - - "github.com/Azure/go-autorest/autorest/adal" -) - -// EncodedAs is a series of constants specifying various data encodings -type EncodedAs string - -const ( - // EncodedAsJSON states that data is encoded as JSON - EncodedAsJSON EncodedAs = "JSON" - - // EncodedAsXML states that data is encoded as Xml - EncodedAsXML EncodedAs = "XML" -) - -// Decoder defines the decoding method json.Decoder and xml.Decoder share -type Decoder interface { - Decode(v interface{}) error -} - -// NewDecoder creates a new decoder appropriate to the passed encoding. -// encodedAs specifies the type of encoding and r supplies the io.Reader containing the -// encoded data. -func NewDecoder(encodedAs EncodedAs, r io.Reader) Decoder { - if encodedAs == EncodedAsJSON { - return json.NewDecoder(r) - } else if encodedAs == EncodedAsXML { - return xml.NewDecoder(r) - } - return nil -} - -// CopyAndDecode decodes the data from the passed io.Reader while making a copy. Having a copy -// is especially useful if there is a chance the data will fail to decode. -// encodedAs specifies the expected encoding, r provides the io.Reader to the data, and v -// is the decoding destination. -func CopyAndDecode(encodedAs EncodedAs, r io.Reader, v interface{}) (bytes.Buffer, error) { - b := bytes.Buffer{} - return b, NewDecoder(encodedAs, io.TeeReader(r, &b)).Decode(v) -} - -// TeeReadCloser returns a ReadCloser that writes to w what it reads from rc. -// It utilizes io.TeeReader to copy the data read and has the same behavior when reading. -// Further, when it is closed, it ensures that rc is closed as well. -func TeeReadCloser(rc io.ReadCloser, w io.Writer) io.ReadCloser { - return &teeReadCloser{rc, io.TeeReader(rc, w)} -} - -type teeReadCloser struct { - rc io.ReadCloser - r io.Reader -} - -func (t *teeReadCloser) Read(p []byte) (int, error) { - return t.r.Read(p) -} - -func (t *teeReadCloser) Close() error { - return t.rc.Close() -} - -func containsInt(ints []int, n int) bool { - for _, i := range ints { - if i == n { - return true - } - } - return false -} - -func escapeValueStrings(m map[string]string) map[string]string { - for key, value := range m { - m[key] = url.QueryEscape(value) - } - return m -} - -func ensureValueStrings(mapOfInterface map[string]interface{}) map[string]string { - mapOfStrings := make(map[string]string) - for key, value := range mapOfInterface { - mapOfStrings[key] = ensureValueString(value) - } - return mapOfStrings -} - -func ensureValueString(value interface{}) string { - if value == nil { - return "" - } - switch v := value.(type) { - case string: - return v - case []byte: - return string(v) - default: - return fmt.Sprintf("%v", v) - } -} - -// MapToValues method converts map[string]interface{} to url.Values. -func MapToValues(m map[string]interface{}) url.Values { - v := url.Values{} - for key, value := range m { - x := reflect.ValueOf(value) - if x.Kind() == reflect.Array || x.Kind() == reflect.Slice { - for i := 0; i < x.Len(); i++ { - v.Add(key, ensureValueString(x.Index(i))) - } - } else { - v.Add(key, ensureValueString(value)) - } - } - return v -} - -// AsStringSlice method converts interface{} to []string. This expects a -//that the parameter passed to be a slice or array of a type that has the underlying -//type a string. -func AsStringSlice(s interface{}) ([]string, error) { - v := reflect.ValueOf(s) - if v.Kind() != reflect.Slice && v.Kind() != reflect.Array { - return nil, NewError("autorest", "AsStringSlice", "the value's type is not an array.") - } - stringSlice := make([]string, 0, v.Len()) - - for i := 0; i < v.Len(); i++ { - stringSlice = append(stringSlice, v.Index(i).String()) - } - return stringSlice, nil -} - -// String method converts interface v to string. If interface is a list, it -// joins list elements using the separator. Note that only sep[0] will be used for -// joining if any separator is specified. -func String(v interface{}, sep ...string) string { - if len(sep) == 0 { - return ensureValueString(v) - } - stringSlice, ok := v.([]string) - if ok == false { - var err error - stringSlice, err = AsStringSlice(v) - if err != nil { - panic(fmt.Sprintf("autorest: Couldn't convert value to a string %s.", err)) - } - } - return ensureValueString(strings.Join(stringSlice, sep[0])) -} - -// Encode method encodes url path and query parameters. -func Encode(location string, v interface{}, sep ...string) string { - s := String(v, sep...) - switch strings.ToLower(location) { - case "path": - return pathEscape(s) - case "query": - return queryEscape(s) - default: - return s - } -} - -func pathEscape(s string) string { - return strings.Replace(url.QueryEscape(s), "+", "%20", -1) -} - -func queryEscape(s string) string { - return url.QueryEscape(s) -} - -// ChangeToGet turns the specified http.Request into a GET (it assumes it wasn't). -// This is mainly useful for long-running operations that use the Azure-AsyncOperation -// header, so we change the initial PUT into a GET to retrieve the final result. -func ChangeToGet(req *http.Request) *http.Request { - req.Method = "GET" - req.Body = nil - req.ContentLength = 0 - req.Header.Del("Content-Length") - return req -} - -// IsTokenRefreshError returns true if the specified error implements the TokenRefreshError -// interface. If err is a DetailedError it will walk the chain of Original errors. -func IsTokenRefreshError(err error) bool { - if _, ok := err.(adal.TokenRefreshError); ok { - return true - } - if de, ok := err.(DetailedError); ok { - return IsTokenRefreshError(de.Original) - } - return false -} - -// IsTemporaryNetworkError returns true if the specified error is a temporary network error or false -// if it's not. If the error doesn't implement the net.Error interface the return value is true. -func IsTemporaryNetworkError(err error) bool { - if netErr, ok := err.(net.Error); !ok || (ok && netErr.Temporary()) { - return true - } - return false -} diff --git a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/version.go b/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/version.go deleted file mode 100644 index 9ecc348..0000000 --- a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/autorest/version.go +++ /dev/null @@ -1,41 +0,0 @@ -package autorest - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "fmt" - "runtime" -) - -const number = "v11.7.0" - -var ( - userAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s", - runtime.Version(), - runtime.GOARCH, - runtime.GOOS, - number, - ) -) - -// UserAgent returns a string containing the Go version, system architecture and OS, and the go-autorest version. -func UserAgent() string { - return userAgent -} - -// Version returns the semantic version (see http://semver.org). -func Version() string { - return number -} diff --git a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/logger/logger.go b/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/logger/logger.go deleted file mode 100644 index da09f39..0000000 --- a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/logger/logger.go +++ /dev/null @@ -1,328 +0,0 @@ -package logger - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "os" - "strings" - "sync" - "time" -) - -// LevelType tells a logger the minimum level to log. When code reports a log entry, -// the LogLevel indicates the level of the log entry. The logger only records entries -// whose level is at least the level it was told to log. See the Log* constants. -// For example, if a logger is configured with LogError, then LogError, LogPanic, -// and LogFatal entries will be logged; lower level entries are ignored. -type LevelType uint32 - -const ( - // LogNone tells a logger not to log any entries passed to it. - LogNone LevelType = iota - - // LogFatal tells a logger to log all LogFatal entries passed to it. - LogFatal - - // LogPanic tells a logger to log all LogPanic and LogFatal entries passed to it. - LogPanic - - // LogError tells a logger to log all LogError, LogPanic and LogFatal entries passed to it. - LogError - - // LogWarning tells a logger to log all LogWarning, LogError, LogPanic and LogFatal entries passed to it. - LogWarning - - // LogInfo tells a logger to log all LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it. - LogInfo - - // LogDebug tells a logger to log all LogDebug, LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it. - LogDebug -) - -const ( - logNone = "NONE" - logFatal = "FATAL" - logPanic = "PANIC" - logError = "ERROR" - logWarning = "WARNING" - logInfo = "INFO" - logDebug = "DEBUG" - logUnknown = "UNKNOWN" -) - -// ParseLevel converts the specified string into the corresponding LevelType. -func ParseLevel(s string) (lt LevelType, err error) { - switch strings.ToUpper(s) { - case logFatal: - lt = LogFatal - case logPanic: - lt = LogPanic - case logError: - lt = LogError - case logWarning: - lt = LogWarning - case logInfo: - lt = LogInfo - case logDebug: - lt = LogDebug - default: - err = fmt.Errorf("bad log level '%s'", s) - } - return -} - -// String implements the stringer interface for LevelType. -func (lt LevelType) String() string { - switch lt { - case LogNone: - return logNone - case LogFatal: - return logFatal - case LogPanic: - return logPanic - case LogError: - return logError - case LogWarning: - return logWarning - case LogInfo: - return logInfo - case LogDebug: - return logDebug - default: - return logUnknown - } -} - -// Filter defines functions for filtering HTTP request/response content. -type Filter struct { - // URL returns a potentially modified string representation of a request URL. - URL func(u *url.URL) string - - // Header returns a potentially modified set of values for the specified key. - // To completely exclude the header key/values return false. - Header func(key string, val []string) (bool, []string) - - // Body returns a potentially modified request/response body. - Body func(b []byte) []byte -} - -func (f Filter) processURL(u *url.URL) string { - if f.URL == nil { - return u.String() - } - return f.URL(u) -} - -func (f Filter) processHeader(k string, val []string) (bool, []string) { - if f.Header == nil { - return true, val - } - return f.Header(k, val) -} - -func (f Filter) processBody(b []byte) []byte { - if f.Body == nil { - return b - } - return f.Body(b) -} - -// Writer defines methods for writing to a logging facility. -type Writer interface { - // Writeln writes the specified message with the standard log entry header and new-line character. - Writeln(level LevelType, message string) - - // Writef writes the specified format specifier with the standard log entry header and no new-line character. - Writef(level LevelType, format string, a ...interface{}) - - // WriteRequest writes the specified HTTP request to the logger if the log level is greater than - // or equal to LogInfo. The request body, if set, is logged at level LogDebug or higher. - // Custom filters can be specified to exclude URL, header, and/or body content from the log. - // By default no request content is excluded. - WriteRequest(req *http.Request, filter Filter) - - // WriteResponse writes the specified HTTP response to the logger if the log level is greater than - // or equal to LogInfo. The response body, if set, is logged at level LogDebug or higher. - // Custom filters can be specified to exclude URL, header, and/or body content from the log. - // By default no response content is excluded. - WriteResponse(resp *http.Response, filter Filter) -} - -// Instance is the default log writer initialized during package init. -// This can be replaced with a custom implementation as required. -var Instance Writer - -// default log level -var logLevel = LogNone - -// Level returns the value specified in AZURE_GO_AUTOREST_LOG_LEVEL. -// If no value was specified the default value is LogNone. -// Custom loggers can call this to retrieve the configured log level. -func Level() LevelType { - return logLevel -} - -func init() { - // separated for testing purposes - initDefaultLogger() -} - -func initDefaultLogger() { - // init with nilLogger so callers don't have to do a nil check on Default - Instance = nilLogger{} - llStr := strings.ToLower(os.Getenv("AZURE_GO_SDK_LOG_LEVEL")) - if llStr == "" { - return - } - var err error - logLevel, err = ParseLevel(llStr) - if err != nil { - fmt.Fprintf(os.Stderr, "go-autorest: failed to parse log level: %s\n", err.Error()) - return - } - if logLevel == LogNone { - return - } - // default to stderr - dest := os.Stderr - lfStr := os.Getenv("AZURE_GO_SDK_LOG_FILE") - if strings.EqualFold(lfStr, "stdout") { - dest = os.Stdout - } else if lfStr != "" { - lf, err := os.Create(lfStr) - if err == nil { - dest = lf - } else { - fmt.Fprintf(os.Stderr, "go-autorest: failed to create log file, using stderr: %s\n", err.Error()) - } - } - Instance = fileLogger{ - logLevel: logLevel, - mu: &sync.Mutex{}, - logFile: dest, - } -} - -// the nil logger does nothing -type nilLogger struct{} - -func (nilLogger) Writeln(LevelType, string) {} - -func (nilLogger) Writef(LevelType, string, ...interface{}) {} - -func (nilLogger) WriteRequest(*http.Request, Filter) {} - -func (nilLogger) WriteResponse(*http.Response, Filter) {} - -// A File is used instead of a Logger so the stream can be flushed after every write. -type fileLogger struct { - logLevel LevelType - mu *sync.Mutex // for synchronizing writes to logFile - logFile *os.File -} - -func (fl fileLogger) Writeln(level LevelType, message string) { - fl.Writef(level, "%s\n", message) -} - -func (fl fileLogger) Writef(level LevelType, format string, a ...interface{}) { - if fl.logLevel >= level { - fl.mu.Lock() - defer fl.mu.Unlock() - fmt.Fprintf(fl.logFile, "%s %s", entryHeader(level), fmt.Sprintf(format, a...)) - fl.logFile.Sync() - } -} - -func (fl fileLogger) WriteRequest(req *http.Request, filter Filter) { - if req == nil || fl.logLevel < LogInfo { - return - } - b := &bytes.Buffer{} - fmt.Fprintf(b, "%s REQUEST: %s %s\n", entryHeader(LogInfo), req.Method, filter.processURL(req.URL)) - // dump headers - for k, v := range req.Header { - if ok, mv := filter.processHeader(k, v); ok { - fmt.Fprintf(b, "%s: %s\n", k, strings.Join(mv, ",")) - } - } - if fl.shouldLogBody(req.Header, req.Body) { - // dump body - body, err := ioutil.ReadAll(req.Body) - if err == nil { - fmt.Fprintln(b, string(filter.processBody(body))) - if nc, ok := req.Body.(io.Seeker); ok { - // rewind to the beginning - nc.Seek(0, io.SeekStart) - } else { - // recreate the body - req.Body = ioutil.NopCloser(bytes.NewReader(body)) - } - } else { - fmt.Fprintf(b, "failed to read body: %v\n", err) - } - } - fl.mu.Lock() - defer fl.mu.Unlock() - fmt.Fprint(fl.logFile, b.String()) - fl.logFile.Sync() -} - -func (fl fileLogger) WriteResponse(resp *http.Response, filter Filter) { - if resp == nil || fl.logLevel < LogInfo { - return - } - b := &bytes.Buffer{} - fmt.Fprintf(b, "%s RESPONSE: %d %s\n", entryHeader(LogInfo), resp.StatusCode, filter.processURL(resp.Request.URL)) - // dump headers - for k, v := range resp.Header { - if ok, mv := filter.processHeader(k, v); ok { - fmt.Fprintf(b, "%s: %s\n", k, strings.Join(mv, ",")) - } - } - if fl.shouldLogBody(resp.Header, resp.Body) { - // dump body - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - if err == nil { - fmt.Fprintln(b, string(filter.processBody(body))) - resp.Body = ioutil.NopCloser(bytes.NewReader(body)) - } else { - fmt.Fprintf(b, "failed to read body: %v\n", err) - } - } - fl.mu.Lock() - defer fl.mu.Unlock() - fmt.Fprint(fl.logFile, b.String()) - fl.logFile.Sync() -} - -// returns true if the provided body should be included in the log -func (fl fileLogger) shouldLogBody(header http.Header, body io.ReadCloser) bool { - ct := header.Get("Content-Type") - return fl.logLevel >= LogDebug && body != nil && !strings.Contains(ct, "application/octet-stream") -} - -// creates standard header for log entries, it contains a timestamp and the log level -func entryHeader(level LevelType) string { - // this format provides a fixed number of digits so the size of the timestamp is constant - return fmt.Sprintf("(%s) %s:", time.Now().Format("2006-01-02T15:04:05.0000000Z07:00"), level.String()) -} diff --git a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/tracing/tracing.go b/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/tracing/tracing.go deleted file mode 100644 index cd61cb1..0000000 --- a/cmd/bpa-operator/vendor/github.com/Azure/go-autorest/tracing/tracing.go +++ /dev/null @@ -1,190 +0,0 @@ -package tracing - -// Copyright 2018 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "context" - "fmt" - "net/http" - "os" - - "contrib.go.opencensus.io/exporter/ocagent" - "go.opencensus.io/plugin/ochttp" - "go.opencensus.io/plugin/ochttp/propagation/tracecontext" - "go.opencensus.io/stats/view" - "go.opencensus.io/trace" -) - -var ( - // Transport is the default tracing RoundTripper. The custom options setter will control - // if traces are being emitted or not. - Transport = &ochttp.Transport{ - Propagation: &tracecontext.HTTPFormat{}, - GetStartOptions: getStartOptions, - } - - // enabled is the flag for marking if tracing is enabled. - enabled = false - - // Sampler is the tracing sampler. If tracing is disabled it will never sample. Otherwise - // it will be using the parent sampler or the default. - sampler = trace.NeverSample() - - // Views for metric instrumentation. - views = map[string]*view.View{} - - // the trace exporter - traceExporter trace.Exporter -) - -func init() { - enableFromEnv() -} - -func enableFromEnv() { - _, ok := os.LookupEnv("AZURE_SDK_TRACING_ENABLED") - _, legacyOk := os.LookupEnv("AZURE_SDK_TRACING_ENABELD") - if ok || legacyOk { - agentEndpoint, ok := os.LookupEnv("OCAGENT_TRACE_EXPORTER_ENDPOINT") - - if ok { - EnableWithAIForwarding(agentEndpoint) - } else { - Enable() - } - } -} - -// IsEnabled returns true if monitoring is enabled for the sdk. -func IsEnabled() bool { - return enabled -} - -// Enable will start instrumentation for metrics and traces. -func Enable() error { - enabled = true - sampler = nil - - err := initStats() - return err -} - -// Disable will disable instrumentation for metrics and traces. -func Disable() { - disableStats() - sampler = trace.NeverSample() - if traceExporter != nil { - trace.UnregisterExporter(traceExporter) - } - enabled = false -} - -// EnableWithAIForwarding will start instrumentation and will connect to app insights forwarder -// exporter making the metrics and traces available in app insights. -func EnableWithAIForwarding(agentEndpoint string) (err error) { - err = Enable() - if err != nil { - return err - } - - traceExporter, err := ocagent.NewExporter(ocagent.WithInsecure(), ocagent.WithAddress(agentEndpoint)) - if err != nil { - return err - } - trace.RegisterExporter(traceExporter) - return -} - -// getStartOptions is the custom options setter for the ochttp package. -func getStartOptions(*http.Request) trace.StartOptions { - return trace.StartOptions{ - Sampler: sampler, - } -} - -// initStats registers the views for the http metrics -func initStats() (err error) { - clientViews := []*view.View{ - ochttp.ClientCompletedCount, - ochttp.ClientRoundtripLatencyDistribution, - ochttp.ClientReceivedBytesDistribution, - ochttp.ClientSentBytesDistribution, - } - for _, cv := range clientViews { - vn := fmt.Sprintf("Azure/go-autorest/tracing-%s", cv.Name) - views[vn] = cv.WithName(vn) - err = view.Register(views[vn]) - if err != nil { - return err - } - } - return -} - -// disableStats will unregister the previously registered metrics -func disableStats() { - for _, v := range views { - view.Unregister(v) - } -} - -// StartSpan starts a trace span -func StartSpan(ctx context.Context, name string) context.Context { - ctx, _ = trace.StartSpan(ctx, name, trace.WithSampler(sampler)) - return ctx -} - -// EndSpan ends a previously started span stored in the context -func EndSpan(ctx context.Context, httpStatusCode int, err error) { - span := trace.FromContext(ctx) - - if span == nil { - return - } - - if err != nil { - span.SetStatus(trace.Status{Message: err.Error(), Code: toTraceStatusCode(httpStatusCode)}) - } - span.End() -} - -// toTraceStatusCode converts HTTP Codes to OpenCensus codes as defined -// at https://github.com/census-instrumentation/opencensus-specs/blob/master/trace/HTTP.md#status -func toTraceStatusCode(httpStatusCode int) int32 { - switch { - case http.StatusOK <= httpStatusCode && httpStatusCode < http.StatusBadRequest: - return trace.StatusCodeOK - case httpStatusCode == http.StatusBadRequest: - return trace.StatusCodeInvalidArgument - case httpStatusCode == http.StatusUnauthorized: // 401 is actually unauthenticated. - return trace.StatusCodeUnauthenticated - case httpStatusCode == http.StatusForbidden: - return trace.StatusCodePermissionDenied - case httpStatusCode == http.StatusNotFound: - return trace.StatusCodeNotFound - case httpStatusCode == http.StatusTooManyRequests: - return trace.StatusCodeResourceExhausted - case httpStatusCode == 499: - return trace.StatusCodeCancelled - case httpStatusCode == http.StatusNotImplemented: - return trace.StatusCodeUnimplemented - case httpStatusCode == http.StatusServiceUnavailable: - return trace.StatusCodeUnavailable - case httpStatusCode == http.StatusGatewayTimeout: - return trace.StatusCodeDeadlineExceeded - default: - return trace.StatusCodeUnknown - } -} diff --git a/cmd/bpa-operator/vendor/github.com/PuerkitoBio/purell/.gitignore b/cmd/bpa-operator/vendor/github.com/PuerkitoBio/purell/.gitignore deleted file mode 100644 index 748e4c8..0000000 --- a/cmd/bpa-operator/vendor/github.com/PuerkitoBio/purell/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -*.sublime-* -.DS_Store -*.swp -*.swo -tags diff --git a/cmd/bpa-operator/vendor/github.com/PuerkitoBio/purell/.travis.yml b/cmd/bpa-operator/vendor/github.com/PuerkitoBio/purell/.travis.yml deleted file mode 100644 index cf31e6a..0000000 --- a/cmd/bpa-operator/vendor/github.com/PuerkitoBio/purell/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go - -go: - - 1.4.x - - 1.5.x - - 1.6.x - - 1.7.x - - 1.8.x - - 1.9.x - - "1.10.x" - - "1.11.x" - - tip diff --git a/cmd/bpa-operator/vendor/github.com/PuerkitoBio/purell/LICENSE b/cmd/bpa-operator/vendor/github.com/PuerkitoBio/purell/LICENSE deleted file mode 100644 index 4b9986d..0000000 --- a/cmd/bpa-operator/vendor/github.com/PuerkitoBio/purell/LICENSE +++ /dev/null @@ -1,12 +0,0 @@ -Copyright (c) 2012, Martin Angers -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - -* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/cmd/bpa-operator/vendor/github.com/PuerkitoBio/purell/README.md b/cmd/bpa-operator/vendor/github.com/PuerkitoBio/purell/README.md deleted file mode 100644 index 07de0c4..0000000 --- a/cmd/bpa-operator/vendor/github.com/PuerkitoBio/purell/README.md +++ /dev/null @@ -1,188 +0,0 @@ -# Purell - -Purell is a tiny Go library to normalize URLs. It returns a pure URL. Pure-ell. Sanitizer and all. Yeah, I know... - -Based on the [wikipedia paper][wiki] and the [RFC 3986 document][rfc]. - -[![build status](https://travis-ci.org/PuerkitoBio/purell.svg?branch=master)](http://travis-ci.org/PuerkitoBio/purell) - -## Install - -`go get github.com/PuerkitoBio/purell` - -## Changelog - -* **v1.1.1** : Fix failing test due to Go1.12 changes (thanks to @ianlancetaylor). -* **2016-11-14 (v1.1.0)** : IDN: Conform to RFC 5895: Fold character width (thanks to @beeker1121). -* **2016-07-27 (v1.0.0)** : Normalize IDN to ASCII (thanks to @zenovich). -* **2015-02-08** : Add fix for relative paths issue ([PR #5][pr5]) and add fix for unnecessary encoding of reserved characters ([see issue #7][iss7]). -* **v0.2.0** : Add benchmarks, Attempt IDN support. -* **v0.1.0** : Initial release. - -## Examples - -From `example_test.go` (note that in your code, you would import "github.com/PuerkitoBio/purell", and would prefix references to its methods and constants with "purell."): - -```go -package purell - -import ( - "fmt" - "net/url" -) - -func ExampleNormalizeURLString() { - if normalized, err := NormalizeURLString("hTTp://someWEBsite.com:80/Amazing%3f/url/", - FlagLowercaseScheme|FlagLowercaseHost|FlagUppercaseEscapes); err != nil { - panic(err) - } else { - fmt.Print(normalized) - } - // Output: http://somewebsite.com:80/Amazing%3F/url/ -} - -func ExampleMustNormalizeURLString() { - normalized := MustNormalizeURLString("hTTpS://someWEBsite.com:443/Amazing%fa/url/", - FlagsUnsafeGreedy) - fmt.Print(normalized) - - // Output: http://somewebsite.com/Amazing%FA/url -} - -func ExampleNormalizeURL() { - if u, err := url.Parse("Http://SomeUrl.com:8080/a/b/.././c///g?c=3&a=1&b=9&c=0#target"); err != nil { - panic(err) - } else { - normalized := NormalizeURL(u, FlagsUsuallySafeGreedy|FlagRemoveDuplicateSlashes|FlagRemoveFragment) - fmt.Print(normalized) - } - - // Output: http://someurl.com:8080/a/c/g?c=3&a=1&b=9&c=0 -} -``` - -## API - -As seen in the examples above, purell offers three methods, `NormalizeURLString(string, NormalizationFlags) (string, error)`, `MustNormalizeURLString(string, NormalizationFlags) (string)` and `NormalizeURL(*url.URL, NormalizationFlags) (string)`. They all normalize the provided URL based on the specified flags. Here are the available flags: - -```go -const ( - // Safe normalizations - FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1 - FlagLowercaseHost // http://HOST -> http://host - FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF - FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA - FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$ - FlagRemoveDefaultPort // http://host:80 -> http://host - FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path - - // Usually safe normalizations - FlagRemoveTrailingSlash // http://host/path/ -> http://host/path - FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags) - FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c - - // Unsafe normalizations - FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/ - FlagRemoveFragment // http://host/path#fragment -> http://host/path - FlagForceHTTP // https://host -> http://host - FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b - FlagRemoveWWW // http://www.host/ -> http://host/ - FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags) - FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3 - - // Normalizations not in the wikipedia article, required to cover tests cases - // submitted by jehiah - FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147 - FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147 - FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147 - FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path - FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path - - // Convenience set of safe normalizations - FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator - - // For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags, - // while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix". - - // Convenience set of usually safe normalizations (includes FlagsSafe) - FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments - FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments - - // Convenience set of unsafe normalizations (includes FlagsUsuallySafe) - FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery - FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery - - // Convenience set of all available flags - FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator - FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator -) -``` - -For convenience, the set of flags `FlagsSafe`, `FlagsUsuallySafe[Greedy|NonGreedy]`, `FlagsUnsafe[Greedy|NonGreedy]` and `FlagsAll[Greedy|NonGreedy]` are provided for the similarly grouped normalizations on [wikipedia's URL normalization page][wiki]. You can add (using the bitwise OR `|` operator) or remove (using the bitwise AND NOT `&^` operator) individual flags from the sets if required, to build your own custom set. - -The [full godoc reference is available on gopkgdoc][godoc]. - -Some things to note: - -* `FlagDecodeUnnecessaryEscapes`, `FlagEncodeNecessaryEscapes`, `FlagUppercaseEscapes` and `FlagRemoveEmptyQuerySeparator` are always implicitly set, because internally, the URL string is parsed as an URL object, which automatically decodes unnecessary escapes, uppercases and encodes necessary ones, and removes empty query separators (an unnecessary `?` at the end of the url). So this operation cannot **not** be done. For this reason, `FlagRemoveEmptyQuerySeparator` (as well as the other three) has been included in the `FlagsSafe` convenience set, instead of `FlagsUnsafe`, where Wikipedia puts it. - -* The `FlagDecodeUnnecessaryEscapes` decodes the following escapes (*from -> to*): - - %24 -> $ - - %26 -> & - - %2B-%3B -> +,-./0123456789:; - - %3D -> = - - %40-%5A -> @ABCDEFGHIJKLMNOPQRSTUVWXYZ - - %5F -> _ - - %61-%7A -> abcdefghijklmnopqrstuvwxyz - - %7E -> ~ - - -* When the `NormalizeURL` function is used (passing an URL object), this source URL object is modified (that is, after the call, the URL object will be modified to reflect the normalization). - -* The *replace IP with domain name* normalization (`http://208.77.188.166/ → http://www.example.com/`) is obviously not possible for a library without making some network requests. This is not implemented in purell. - -* The *remove unused query string parameters* and *remove default query parameters* are also not implemented, since this is a very case-specific normalization, and it is quite trivial to do with an URL object. - -### Safe vs Usually Safe vs Unsafe - -Purell allows you to control the level of risk you take while normalizing an URL. You can aggressively normalize, play it totally safe, or anything in between. - -Consider the following URL: - -`HTTPS://www.RooT.com/toto/t%45%1f///a/./b/../c/?z=3&w=2&a=4&w=1#invalid` - -Normalizing with the `FlagsSafe` gives: - -`https://www.root.com/toto/tE%1F///a/./b/../c/?z=3&w=2&a=4&w=1#invalid` - -With the `FlagsUsuallySafeGreedy`: - -`https://www.root.com/toto/tE%1F///a/c?z=3&w=2&a=4&w=1#invalid` - -And with `FlagsUnsafeGreedy`: - -`http://root.com/toto/tE%1F/a/c?a=4&w=1&w=2&z=3` - -## TODOs - -* Add a class/default instance to allow specifying custom directory index names? At the moment, removing directory index removes `(^|/)((?:default|index)\.\w{1,4})$`. - -## Thanks / Contributions - -@rogpeppe -@jehiah -@opennota -@pchristopher1275 -@zenovich -@beeker1121 - -## License - -The [BSD 3-Clause license][bsd]. - -[bsd]: http://opensource.org/licenses/BSD-3-Clause -[wiki]: http://en.wikipedia.org/wiki/URL_normalization -[rfc]: http://tools.ietf.org/html/rfc3986#section-6 -[godoc]: http://go.pkgdoc.org/github.com/PuerkitoBio/purell -[pr5]: https://github.com/PuerkitoBio/purell/pull/5 -[iss7]: https://github.com/PuerkitoBio/purell/issues/7 diff --git a/cmd/bpa-operator/vendor/github.com/PuerkitoBio/purell/purell.go b/cmd/bpa-operator/vendor/github.com/PuerkitoBio/purell/purell.go deleted file mode 100644 index 6d0fc19..0000000 --- a/cmd/bpa-operator/vendor/github.com/PuerkitoBio/purell/purell.go +++ /dev/null @@ -1,379 +0,0 @@ -/* -Package purell offers URL normalization as described on the wikipedia page: -http://en.wikipedia.org/wiki/URL_normalization -*/ -package purell - -import ( - "bytes" - "fmt" - "net/url" - "regexp" - "sort" - "strconv" - "strings" - - "github.com/PuerkitoBio/urlesc" - "golang.org/x/net/idna" - "golang.org/x/text/unicode/norm" - "golang.org/x/text/width" -) - -// A set of normalization flags determines how a URL will -// be normalized. -type NormalizationFlags uint - -const ( - // Safe normalizations - FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1 - FlagLowercaseHost // http://HOST -> http://host - FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF - FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA - FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$ - FlagRemoveDefaultPort // http://host:80 -> http://host - FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path - - // Usually safe normalizations - FlagRemoveTrailingSlash // http://host/path/ -> http://host/path - FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags) - FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c - - // Unsafe normalizations - FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/ - FlagRemoveFragment // http://host/path#fragment -> http://host/path - FlagForceHTTP // https://host -> http://host - FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b - FlagRemoveWWW // http://www.host/ -> http://host/ - FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags) - FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3 - - // Normalizations not in the wikipedia article, required to cover tests cases - // submitted by jehiah - FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147 - FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147 - FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147 - FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path - FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path - - // Convenience set of safe normalizations - FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator - - // For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags, - // while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix". - - // Convenience set of usually safe normalizations (includes FlagsSafe) - FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments - FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments - - // Convenience set of unsafe normalizations (includes FlagsUsuallySafe) - FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery - FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery - - // Convenience set of all available flags - FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator - FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator -) - -const ( - defaultHttpPort = ":80" - defaultHttpsPort = ":443" -) - -// Regular expressions used by the normalizations -var rxPort = regexp.MustCompile(`(:\d+)/?$`) -var rxDirIndex = regexp.MustCompile(`(^|/)((?:default|index)\.\w{1,4})$`) -var rxDupSlashes = regexp.MustCompile(`/{2,}`) -var rxDWORDHost = regexp.MustCompile(`^(\d+)((?:\.+)?(?:\:\d*)?)$`) -var rxOctalHost = regexp.MustCompile(`^(0\d*)\.(0\d*)\.(0\d*)\.(0\d*)((?:\.+)?(?:\:\d*)?)$`) -var rxHexHost = regexp.MustCompile(`^0x([0-9A-Fa-f]+)((?:\.+)?(?:\:\d*)?)$`) -var rxHostDots = regexp.MustCompile(`^(.+?)(:\d+)?$`) -var rxEmptyPort = regexp.MustCompile(`:+$`) - -// Map of flags to implementation function. -// FlagDecodeUnnecessaryEscapes has no action, since it is done automatically -// by parsing the string as an URL. Same for FlagUppercaseEscapes and FlagRemoveEmptyQuerySeparator. - -// Since maps have undefined traversing order, make a slice of ordered keys -var flagsOrder = []NormalizationFlags{ - FlagLowercaseScheme, - FlagLowercaseHost, - FlagRemoveDefaultPort, - FlagRemoveDirectoryIndex, - FlagRemoveDotSegments, - FlagRemoveFragment, - FlagForceHTTP, // Must be after remove default port (because https=443/http=80) - FlagRemoveDuplicateSlashes, - FlagRemoveWWW, - FlagAddWWW, - FlagSortQuery, - FlagDecodeDWORDHost, - FlagDecodeOctalHost, - FlagDecodeHexHost, - FlagRemoveUnnecessaryHostDots, - FlagRemoveEmptyPortSeparator, - FlagRemoveTrailingSlash, // These two (add/remove trailing slash) must be last - FlagAddTrailingSlash, -} - -// ... and then the map, where order is unimportant -var flags = map[NormalizationFlags]func(*url.URL){ - FlagLowercaseScheme: lowercaseScheme, - FlagLowercaseHost: lowercaseHost, - FlagRemoveDefaultPort: removeDefaultPort, - FlagRemoveDirectoryIndex: removeDirectoryIndex, - FlagRemoveDotSegments: removeDotSegments, - FlagRemoveFragment: removeFragment, - FlagForceHTTP: forceHTTP, - FlagRemoveDuplicateSlashes: removeDuplicateSlashes, - FlagRemoveWWW: removeWWW, - FlagAddWWW: addWWW, - FlagSortQuery: sortQuery, - FlagDecodeDWORDHost: decodeDWORDHost, - FlagDecodeOctalHost: decodeOctalHost, - FlagDecodeHexHost: decodeHexHost, - FlagRemoveUnnecessaryHostDots: removeUnncessaryHostDots, - FlagRemoveEmptyPortSeparator: removeEmptyPortSeparator, - FlagRemoveTrailingSlash: removeTrailingSlash, - FlagAddTrailingSlash: addTrailingSlash, -} - -// MustNormalizeURLString returns the normalized string, and panics if an error occurs. -// It takes an URL string as input, as well as the normalization flags. -func MustNormalizeURLString(u string, f NormalizationFlags) string { - result, e := NormalizeURLString(u, f) - if e != nil { - panic(e) - } - return result -} - -// NormalizeURLString returns the normalized string, or an error if it can't be parsed into an URL object. -// It takes an URL string as input, as well as the normalization flags. -func NormalizeURLString(u string, f NormalizationFlags) (string, error) { - parsed, err := url.Parse(u) - if err != nil { - return "", err - } - - if f&FlagLowercaseHost == FlagLowercaseHost { - parsed.Host = strings.ToLower(parsed.Host) - } - - // The idna package doesn't fully conform to RFC 5895 - // (https://tools.ietf.org/html/rfc5895), so we do it here. - // Taken from Go 1.8 cycle source, courtesy of bradfitz. - // TODO: Remove when (if?) idna package conforms to RFC 5895. - parsed.Host = width.Fold.String(parsed.Host) - parsed.Host = norm.NFC.String(parsed.Host) - if parsed.Host, err = idna.ToASCII(parsed.Host); err != nil { - return "", err - } - - return NormalizeURL(parsed, f), nil -} - -// NormalizeURL returns the normalized string. -// It takes a parsed URL object as input, as well as the normalization flags. -func NormalizeURL(u *url.URL, f NormalizationFlags) string { - for _, k := range flagsOrder { - if f&k == k { - flags[k](u) - } - } - return urlesc.Escape(u) -} - -func lowercaseScheme(u *url.URL) { - if len(u.Scheme) > 0 { - u.Scheme = strings.ToLower(u.Scheme) - } -} - -func lowercaseHost(u *url.URL) { - if len(u.Host) > 0 { - u.Host = strings.ToLower(u.Host) - } -} - -func removeDefaultPort(u *url.URL) { - if len(u.Host) > 0 { - scheme := strings.ToLower(u.Scheme) - u.Host = rxPort.ReplaceAllStringFunc(u.Host, func(val string) string { - if (scheme == "http" && val == defaultHttpPort) || (scheme == "https" && val == defaultHttpsPort) { - return "" - } - return val - }) - } -} - -func removeTrailingSlash(u *url.URL) { - if l := len(u.Path); l > 0 { - if strings.HasSuffix(u.Path, "/") { - u.Path = u.Path[:l-1] - } - } else if l = len(u.Host); l > 0 { - if strings.HasSuffix(u.Host, "/") { - u.Host = u.Host[:l-1] - } - } -} - -func addTrailingSlash(u *url.URL) { - if l := len(u.Path); l > 0 { - if !strings.HasSuffix(u.Path, "/") { - u.Path += "/" - } - } else if l = len(u.Host); l > 0 { - if !strings.HasSuffix(u.Host, "/") { - u.Host += "/" - } - } -} - -func removeDotSegments(u *url.URL) { - if len(u.Path) > 0 { - var dotFree []string - var lastIsDot bool - - sections := strings.Split(u.Path, "/") - for _, s := range sections { - if s == ".." { - if len(dotFree) > 0 { - dotFree = dotFree[:len(dotFree)-1] - } - } else if s != "." { - dotFree = append(dotFree, s) - } - lastIsDot = (s == "." || s == "..") - } - // Special case if host does not end with / and new path does not begin with / - u.Path = strings.Join(dotFree, "/") - if u.Host != "" && !strings.HasSuffix(u.Host, "/") && !strings.HasPrefix(u.Path, "/") { - u.Path = "/" + u.Path - } - // Special case if the last segment was a dot, make sure the path ends with a slash - if lastIsDot && !strings.HasSuffix(u.Path, "/") { - u.Path += "/" - } - } -} - -func removeDirectoryIndex(u *url.URL) { - if len(u.Path) > 0 { - u.Path = rxDirIndex.ReplaceAllString(u.Path, "$1") - } -} - -func removeFragment(u *url.URL) { - u.Fragment = "" -} - -func forceHTTP(u *url.URL) { - if strings.ToLower(u.Scheme) == "https" { - u.Scheme = "http" - } -} - -func removeDuplicateSlashes(u *url.URL) { - if len(u.Path) > 0 { - u.Path = rxDupSlashes.ReplaceAllString(u.Path, "/") - } -} - -func removeWWW(u *url.URL) { - if len(u.Host) > 0 && strings.HasPrefix(strings.ToLower(u.Host), "www.") { - u.Host = u.Host[4:] - } -} - -func addWWW(u *url.URL) { - if len(u.Host) > 0 && !strings.HasPrefix(strings.ToLower(u.Host), "www.") { - u.Host = "www." + u.Host - } -} - -func sortQuery(u *url.URL) { - q := u.Query() - - if len(q) > 0 { - arKeys := make([]string, len(q)) - i := 0 - for k := range q { - arKeys[i] = k - i++ - } - sort.Strings(arKeys) - buf := new(bytes.Buffer) - for _, k := range arKeys { - sort.Strings(q[k]) - for _, v := range q[k] { - if buf.Len() > 0 { - buf.WriteRune('&') - } - buf.WriteString(fmt.Sprintf("%s=%s", k, urlesc.QueryEscape(v))) - } - } - - // Rebuild the raw query string - u.RawQuery = buf.String() - } -} - -func decodeDWORDHost(u *url.URL) { - if len(u.Host) > 0 { - if matches := rxDWORDHost.FindStringSubmatch(u.Host); len(matches) > 2 { - var parts [4]int64 - - dword, _ := strconv.ParseInt(matches[1], 10, 0) - for i, shift := range []uint{24, 16, 8, 0} { - parts[i] = dword >> shift & 0xFF - } - u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[2]) - } - } -} - -func decodeOctalHost(u *url.URL) { - if len(u.Host) > 0 { - if matches := rxOctalHost.FindStringSubmatch(u.Host); len(matches) > 5 { - var parts [4]int64 - - for i := 1; i <= 4; i++ { - parts[i-1], _ = strconv.ParseInt(matches[i], 8, 0) - } - u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[5]) - } - } -} - -func decodeHexHost(u *url.URL) { - if len(u.Host) > 0 { - if matches := rxHexHost.FindStringSubmatch(u.Host); len(matches) > 2 { - // Conversion is safe because of regex validation - parsed, _ := strconv.ParseInt(matches[1], 16, 0) - // Set host as DWORD (base 10) encoded host - u.Host = fmt.Sprintf("%d%s", parsed, matches[2]) - // The rest is the same as decoding a DWORD host - decodeDWORDHost(u) - } - } -} - -func removeUnncessaryHostDots(u *url.URL) { - if len(u.Host) > 0 { - if matches := rxHostDots.FindStringSubmatch(u.Host); len(matches) > 1 { - // Trim the leading and trailing dots - u.Host = strings.Trim(matches[1], ".") - if len(matches) > 2 { - u.Host += matches[2] - } - } - } -} - -func removeEmptyPortSeparator(u *url.URL) { - if len(u.Host) > 0 { - u.Host = rxEmptyPort.ReplaceAllString(u.Host, "") - } -} diff --git a/cmd/bpa-operator/vendor/github.com/PuerkitoBio/urlesc/.travis.yml b/cmd/bpa-operator/vendor/github.com/PuerkitoBio/urlesc/.travis.yml deleted file mode 100644 index ba6b225..0000000 --- a/cmd/bpa-operator/vendor/github.com/PuerkitoBio/urlesc/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -language: go - -go: - - 1.4.x - - 1.5.x - - 1.6.x - - 1.7.x - - 1.8.x - - tip - -install: - - go build . - -script: - - go test -v diff --git a/cmd/bpa-operator/vendor/github.com/PuerkitoBio/urlesc/LICENSE b/cmd/bpa-operator/vendor/github.com/PuerkitoBio/urlesc/LICENSE deleted file mode 100644 index 7448756..0000000 --- a/cmd/bpa-operator/vendor/github.com/PuerkitoBio/urlesc/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/cmd/bpa-operator/vendor/github.com/PuerkitoBio/urlesc/README.md b/cmd/bpa-operator/vendor/github.com/PuerkitoBio/urlesc/README.md deleted file mode 100644 index 57aff0a..0000000 --- a/cmd/bpa-operator/vendor/github.com/PuerkitoBio/urlesc/README.md +++ /dev/null @@ -1,16 +0,0 @@ -urlesc [![Build Status](https://travis-ci.org/PuerkitoBio/urlesc.svg?branch=master)](https://travis-ci.org/PuerkitoBio/urlesc) [![GoDoc](http://godoc.org/github.com/PuerkitoBio/urlesc?status.svg)](http://godoc.org/github.com/PuerkitoBio/urlesc) -====== - -Package urlesc implements query escaping as per RFC 3986. - -It contains some parts of the net/url package, modified so as to allow -some reserved characters incorrectly escaped by net/url (see [issue 5684](https://github.com/golang/go/issues/5684)). - -## Install - - go get github.com/PuerkitoBio/urlesc - -## License - -Go license (BSD-3-Clause) - diff --git a/cmd/bpa-operator/vendor/github.com/PuerkitoBio/urlesc/urlesc.go b/cmd/bpa-operator/vendor/github.com/PuerkitoBio/urlesc/urlesc.go deleted file mode 100644 index 1b84624..0000000 --- a/cmd/bpa-operator/vendor/github.com/PuerkitoBio/urlesc/urlesc.go +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package urlesc implements query escaping as per RFC 3986. -// It contains some parts of the net/url package, modified so as to allow -// some reserved characters incorrectly escaped by net/url. -// See https://github.com/golang/go/issues/5684 -package urlesc - -import ( - "bytes" - "net/url" - "strings" -) - -type encoding int - -const ( - encodePath encoding = 1 + iota - encodeUserPassword - encodeQueryComponent - encodeFragment -) - -// Return true if the specified character should be escaped when -// appearing in a URL string, according to RFC 3986. -func shouldEscape(c byte, mode encoding) bool { - // §2.3 Unreserved characters (alphanum) - if 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' { - return false - } - - switch c { - case '-', '.', '_', '~': // §2.3 Unreserved characters (mark) - return false - - // §2.2 Reserved characters (reserved) - case ':', '/', '?', '#', '[', ']', '@', // gen-delims - '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // sub-delims - // Different sections of the URL allow a few of - // the reserved characters to appear unescaped. - switch mode { - case encodePath: // §3.3 - // The RFC allows sub-delims and : @. - // '/', '[' and ']' can be used to assign meaning to individual path - // segments. This package only manipulates the path as a whole, - // so we allow those as well. That leaves only ? and # to escape. - return c == '?' || c == '#' - - case encodeUserPassword: // §3.2.1 - // The RFC allows : and sub-delims in - // userinfo. The parsing of userinfo treats ':' as special so we must escape - // all the gen-delims. - return c == ':' || c == '/' || c == '?' || c == '#' || c == '[' || c == ']' || c == '@' - - case encodeQueryComponent: // §3.4 - // The RFC allows / and ?. - return c != '/' && c != '?' - - case encodeFragment: // §4.1 - // The RFC text is silent but the grammar allows - // everything, so escape nothing but # - return c == '#' - } - } - - // Everything else must be escaped. - return true -} - -// QueryEscape escapes the string so it can be safely placed -// inside a URL query. -func QueryEscape(s string) string { - return escape(s, encodeQueryComponent) -} - -func escape(s string, mode encoding) string { - spaceCount, hexCount := 0, 0 - for i := 0; i < len(s); i++ { - c := s[i] - if shouldEscape(c, mode) { - if c == ' ' && mode == encodeQueryComponent { - spaceCount++ - } else { - hexCount++ - } - } - } - - if spaceCount == 0 && hexCount == 0 { - return s - } - - t := make([]byte, len(s)+2*hexCount) - j := 0 - for i := 0; i < len(s); i++ { - switch c := s[i]; { - case c == ' ' && mode == encodeQueryComponent: - t[j] = '+' - j++ - case shouldEscape(c, mode): - t[j] = '%' - t[j+1] = "0123456789ABCDEF"[c>>4] - t[j+2] = "0123456789ABCDEF"[c&15] - j += 3 - default: - t[j] = s[i] - j++ - } - } - return string(t) -} - -var uiReplacer = strings.NewReplacer( - "%21", "!", - "%27", "'", - "%28", "(", - "%29", ")", - "%2A", "*", -) - -// unescapeUserinfo unescapes some characters that need not to be escaped as per RFC3986. -func unescapeUserinfo(s string) string { - return uiReplacer.Replace(s) -} - -// Escape reassembles the URL into a valid URL string. -// The general form of the result is one of: -// -// scheme:opaque -// scheme://userinfo@host/path?query#fragment -// -// If u.Opaque is non-empty, String uses the first form; -// otherwise it uses the second form. -// -// In the second form, the following rules apply: -// - if u.Scheme is empty, scheme: is omitted. -// - if u.User is nil, userinfo@ is omitted. -// - if u.Host is empty, host/ is omitted. -// - if u.Scheme and u.Host are empty and u.User is nil, -// the entire scheme://userinfo@host/ is omitted. -// - if u.Host is non-empty and u.Path begins with a /, -// the form host/path does not add its own /. -// - if u.RawQuery is empty, ?query is omitted. -// - if u.Fragment is empty, #fragment is omitted. -func Escape(u *url.URL) string { - var buf bytes.Buffer - if u.Scheme != "" { - buf.WriteString(u.Scheme) - buf.WriteByte(':') - } - if u.Opaque != "" { - buf.WriteString(u.Opaque) - } else { - if u.Scheme != "" || u.Host != "" || u.User != nil { - buf.WriteString("//") - if ui := u.User; ui != nil { - buf.WriteString(unescapeUserinfo(ui.String())) - buf.WriteByte('@') - } - if h := u.Host; h != "" { - buf.WriteString(h) - } - } - if u.Path != "" && u.Path[0] != '/' && u.Host != "" { - buf.WriteByte('/') - } - buf.WriteString(escape(u.Path, encodePath)) - } - if u.RawQuery != "" { - buf.WriteByte('?') - buf.WriteString(u.RawQuery) - } - if u.Fragment != "" { - buf.WriteByte('#') - buf.WriteString(escape(u.Fragment, encodeFragment)) - } - return buf.String() -} diff --git a/cmd/bpa-operator/vendor/github.com/appscode/jsonpatch/.gitignore b/cmd/bpa-operator/vendor/github.com/appscode/jsonpatch/.gitignore deleted file mode 100644 index 0e9448e..0000000 --- a/cmd/bpa-operator/vendor/github.com/appscode/jsonpatch/.gitignore +++ /dev/null @@ -1,26 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof - -.idea/ diff --git a/cmd/bpa-operator/vendor/github.com/appscode/jsonpatch/.travis.yml b/cmd/bpa-operator/vendor/github.com/appscode/jsonpatch/.travis.yml deleted file mode 100644 index 92f2439..0000000 --- a/cmd/bpa-operator/vendor/github.com/appscode/jsonpatch/.travis.yml +++ /dev/null @@ -1,10 +0,0 @@ -language: go -go: - - 1.x - - tip - -env: - - GO111MODULE=on - -script: - - go test -v diff --git a/cmd/bpa-operator/vendor/github.com/appscode/jsonpatch/LICENSE b/cmd/bpa-operator/vendor/github.com/appscode/jsonpatch/LICENSE deleted file mode 100644 index 8f71f43..0000000 --- a/cmd/bpa-operator/vendor/github.com/appscode/jsonpatch/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/cmd/bpa-operator/vendor/github.com/appscode/jsonpatch/README.md b/cmd/bpa-operator/vendor/github.com/appscode/jsonpatch/README.md deleted file mode 100644 index bbbf729..0000000 --- a/cmd/bpa-operator/vendor/github.com/appscode/jsonpatch/README.md +++ /dev/null @@ -1,53 +0,0 @@ -# jsonpatch - -[![Build Status](https://travis-ci.org/appscode/jsonpatch.svg?branch=master)](https://travis-ci.org/appscode/jsonpatch) -[![Go Report Card](https://goreportcard.com/badge/appscode/jsonpatch "Go Report Card")](https://goreportcard.com/report/appscode/jsonpatch) -[![GoDoc](https://godoc.org/github.com/appscode/jsonpatch?status.svg "GoDoc")](https://godoc.org/github.com/appscode/jsonpatch) - -As per http://jsonpatch.com JSON Patch is specified in RFC 6902 from the IETF. - -JSON Patch allows you to generate JSON that describes changes you want to make to a document, so you don't have to send the whole doc. JSON Patch format is supported by HTTP PATCH method, allowing for standards based partial updates via REST APIs. - -```console -go get github.com/appscode/jsonpatch -``` - -I tried some of the other "jsonpatch" go implementations, but none of them could diff two json documents and -generate format like jsonpatch.com specifies. Here's an example of the patch format: - -```json -[ - { "op": "replace", "path": "/baz", "value": "boo" }, - { "op": "add", "path": "/hello", "value": ["world"] }, - { "op": "remove", "path": "/foo"} -] - -``` -The API is super simple - -## example - -```go -package main - -import ( - "fmt" - "github.com/appscode/jsonpatch" -) - -var simpleA = `{"a":100, "b":200, "c":"hello"}` -var simpleB = `{"a":100, "b":200, "c":"goodbye"}` - -func main() { - patch, e := jsonpatch.CreatePatch([]byte(simpleA), []byte(simpleA)) - if e != nil { - fmt.Printf("Error creating JSON patch:%v", e) - return - } - for _, operation := range patch { - fmt.Printf("%s\n", operation.Json()) - } -} -``` - -This code needs more tests, as it's a highly recursive, type-fiddly monster. It's not a lot of code, but it has to deal with a lot of complexity. diff --git a/cmd/bpa-operator/vendor/github.com/appscode/jsonpatch/go.mod b/cmd/bpa-operator/vendor/github.com/appscode/jsonpatch/go.mod deleted file mode 100644 index 458d129..0000000 --- a/cmd/bpa-operator/vendor/github.com/appscode/jsonpatch/go.mod +++ /dev/null @@ -1,8 +0,0 @@ -module github.com/appscode/jsonpatch - -require ( - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/evanphx/json-patch v4.0.0+incompatible - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/stretchr/testify v1.2.2 -) diff --git a/cmd/bpa-operator/vendor/github.com/appscode/jsonpatch/go.sum b/cmd/bpa-operator/vendor/github.com/appscode/jsonpatch/go.sum deleted file mode 100644 index 0972e0e..0000000 --- a/cmd/bpa-operator/vendor/github.com/appscode/jsonpatch/go.sum +++ /dev/null @@ -1,8 +0,0 @@ -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/evanphx/json-patch v4.0.0+incompatible h1:xregGRMLBeuRcwiOTHRCsPPuzCQlqhxUPbqdw+zNkLc= -github.com/evanphx/json-patch v4.0.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= diff --git a/cmd/bpa-operator/vendor/github.com/appscode/jsonpatch/jsonpatch.go b/cmd/bpa-operator/vendor/github.com/appscode/jsonpatch/jsonpatch.go deleted file mode 100644 index 3e69894..0000000 --- a/cmd/bpa-operator/vendor/github.com/appscode/jsonpatch/jsonpatch.go +++ /dev/null @@ -1,336 +0,0 @@ -package jsonpatch - -import ( - "bytes" - "encoding/json" - "fmt" - "reflect" - "strings" -) - -var errBadJSONDoc = fmt.Errorf("invalid JSON Document") - -type JsonPatchOperation = Operation - -type Operation struct { - Operation string `json:"op"` - Path string `json:"path"` - Value interface{} `json:"value,omitempty"` -} - -func (j *Operation) Json() string { - b, _ := json.Marshal(j) - return string(b) -} - -func (j *Operation) MarshalJSON() ([]byte, error) { - var b bytes.Buffer - b.WriteString("{") - b.WriteString(fmt.Sprintf(`"op":"%s"`, j.Operation)) - b.WriteString(fmt.Sprintf(`,"path":"%s"`, j.Path)) - // Consider omitting Value for non-nullable operations. - if j.Value != nil || j.Operation == "replace" || j.Operation == "add" { - v, err := json.Marshal(j.Value) - if err != nil { - return nil, err - } - b.WriteString(`,"value":`) - b.Write(v) - } - b.WriteString("}") - return b.Bytes(), nil -} - -type ByPath []Operation - -func (a ByPath) Len() int { return len(a) } -func (a ByPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a ByPath) Less(i, j int) bool { return a[i].Path < a[j].Path } - -func NewPatch(operation, path string, value interface{}) Operation { - return Operation{Operation: operation, Path: path, Value: value} -} - -// CreatePatch creates a patch as specified in http://jsonpatch.com/ -// -// 'a' is original, 'b' is the modified document. Both are to be given as json encoded content. -// The function will return an array of JsonPatchOperations -// -// An error will be returned if any of the two documents are invalid. -func CreatePatch(a, b []byte) ([]Operation, error) { - aI := map[string]interface{}{} - bI := map[string]interface{}{} - err := json.Unmarshal(a, &aI) - if err != nil { - return nil, errBadJSONDoc - } - err = json.Unmarshal(b, &bI) - if err != nil { - return nil, errBadJSONDoc - } - return diff(aI, bI, "", []Operation{}) -} - -// Returns true if the values matches (must be json types) -// The types of the values must match, otherwise it will always return false -// If two map[string]interface{} are given, all elements must match. -func matchesValue(av, bv interface{}) bool { - if reflect.TypeOf(av) != reflect.TypeOf(bv) { - return false - } - switch at := av.(type) { - case string: - bt, ok := bv.(string) - if ok && bt == at { - return true - } - case float64: - bt, ok := bv.(float64) - if ok && bt == at { - return true - } - case bool: - bt, ok := bv.(bool) - if ok && bt == at { - return true - } - case map[string]interface{}: - bt, ok := bv.(map[string]interface{}) - if !ok { - return false - } - for key := range at { - if !matchesValue(at[key], bt[key]) { - return false - } - } - for key := range bt { - if !matchesValue(at[key], bt[key]) { - return false - } - } - return true - case []interface{}: - bt, ok := bv.([]interface{}) - if !ok { - return false - } - if len(bt) != len(at) { - return false - } - for key := range at { - if !matchesValue(at[key], bt[key]) { - return false - } - } - for key := range bt { - if !matchesValue(at[key], bt[key]) { - return false - } - } - return true - } - return false -} - -// From http://tools.ietf.org/html/rfc6901#section-4 : -// -// Evaluation of each reference token begins by decoding any escaped -// character sequence. This is performed by first transforming any -// occurrence of the sequence '~1' to '/', and then transforming any -// occurrence of the sequence '~0' to '~'. -// TODO decode support: -// var rfc6901Decoder = strings.NewReplacer("~1", "/", "~0", "~") - -var rfc6901Encoder = strings.NewReplacer("~", "~0", "/", "~1") - -func makePath(path string, newPart interface{}) string { - key := rfc6901Encoder.Replace(fmt.Sprintf("%v", newPart)) - if path == "" { - return "/" + key - } - if strings.HasSuffix(path, "/") { - return path + key - } - return path + "/" + key -} - -// diff returns the (recursive) difference between a and b as an array of JsonPatchOperations. -func diff(a, b map[string]interface{}, path string, patch []Operation) ([]Operation, error) { - for key, bv := range b { - p := makePath(path, key) - av, ok := a[key] - // value was added - if !ok { - patch = append(patch, NewPatch("add", p, bv)) - continue - } - // Types are the same, compare values - var err error - patch, err = handleValues(av, bv, p, patch) - if err != nil { - return nil, err - } - } - // Now add all deleted values as nil - for key := range a { - _, found := b[key] - if !found { - p := makePath(path, key) - - patch = append(patch, NewPatch("remove", p, nil)) - } - } - return patch, nil -} - -func handleValues(av, bv interface{}, p string, patch []Operation) ([]Operation, error) { - { - at := reflect.TypeOf(av) - bt := reflect.TypeOf(bv) - if at == nil && bt == nil { - // do nothing - return patch, nil - } else if at == nil && bt != nil { - return append(patch, NewPatch("add", p, bv)), nil - } else if at != bt { - // If types have changed, replace completely (preserves null in destination) - return append(patch, NewPatch("replace", p, bv)), nil - } - } - - var err error - switch at := av.(type) { - case map[string]interface{}: - bt := bv.(map[string]interface{}) - patch, err = diff(at, bt, p, patch) - if err != nil { - return nil, err - } - case string, float64, bool: - if !matchesValue(av, bv) { - patch = append(patch, NewPatch("replace", p, bv)) - } - case []interface{}: - bt := bv.([]interface{}) - if isSimpleArray(at) && isSimpleArray(bt) { - patch = append(patch, compareEditDistance(at, bt, p)...) - } else { - n := min(len(at), len(bt)) - for i := len(at) - 1; i >= n; i-- { - patch = append(patch, NewPatch("remove", makePath(p, i), nil)) - } - for i := n; i < len(bt); i++ { - patch = append(patch, NewPatch("add", makePath(p, i), bt[i])) - } - for i := 0; i < n; i++ { - var err error - patch, err = handleValues(at[i], bt[i], makePath(p, i), patch) - if err != nil { - return nil, err - } - } - } - default: - panic(fmt.Sprintf("Unknown type:%T ", av)) - } - return patch, nil -} - -func isBasicType(a interface{}) bool { - switch a.(type) { - case string, float64, bool: - default: - return false - } - return true -} - -func isSimpleArray(a []interface{}) bool { - for i := range a { - switch a[i].(type) { - case string, float64, bool: - default: - val := reflect.ValueOf(a[i]) - if val.Kind() == reflect.Map { - for _, k := range val.MapKeys() { - av := val.MapIndex(k) - if av.Kind() == reflect.Ptr || av.Kind() == reflect.Interface { - if av.IsNil() { - continue - } - av = av.Elem() - } - if av.Kind() != reflect.String && av.Kind() != reflect.Float64 && av.Kind() != reflect.Bool { - return false - } - } - return true - } - return false - } - } - return true -} - -// https://en.wikipedia.org/wiki/Wagner%E2%80%93Fischer_algorithm -// Adapted from https://github.com/texttheater/golang-levenshtein -func compareEditDistance(s, t []interface{}, p string) []Operation { - m := len(s) - n := len(t) - - d := make([][]int, m+1) - for i := 0; i <= m; i++ { - d[i] = make([]int, n+1) - d[i][0] = i - } - for j := 0; j <= n; j++ { - d[0][j] = j - } - - for j := 1; j <= n; j++ { - for i := 1; i <= m; i++ { - if reflect.DeepEqual(s[i-1], t[j-1]) { - d[i][j] = d[i-1][j-1] // no op required - } else { - del := d[i-1][j] + 1 - add := d[i][j-1] + 1 - rep := d[i-1][j-1] + 1 - d[i][j] = min(rep, min(add, del)) - } - } - } - - return backtrace(s, t, p, m, n, d) -} - -func min(x int, y int) int { - if y < x { - return y - } - return x -} - -func backtrace(s, t []interface{}, p string, i int, j int, matrix [][]int) []Operation { - if i > 0 && matrix[i-1][j]+1 == matrix[i][j] { - op := NewPatch("remove", makePath(p, i-1), nil) - return append([]Operation{op}, backtrace(s, t, p, i-1, j, matrix)...) - } - if j > 0 && matrix[i][j-1]+1 == matrix[i][j] { - op := NewPatch("add", makePath(p, i), t[j-1]) - return append([]Operation{op}, backtrace(s, t, p, i, j-1, matrix)...) - } - if i > 0 && j > 0 && matrix[i-1][j-1]+1 == matrix[i][j] { - if isBasicType(s[0]) { - op := NewPatch("replace", makePath(p, i-1), t[j-1]) - return append([]Operation{op}, backtrace(s, t, p, i-1, j-1, matrix)...) - } - - p2, _ := handleValues(s[j-1], t[j-1], makePath(p, i-1), []Operation{}) - return append(p2, backtrace(s, t, p, i-1, j-1, matrix)...) - } - if i > 0 && j > 0 && matrix[i-1][j-1] == matrix[i][j] { - return backtrace(s, t, p, i-1, j-1, matrix) - } - return []Operation{} -} diff --git a/cmd/bpa-operator/vendor/github.com/beorn7/perks/LICENSE b/cmd/bpa-operator/vendor/github.com/beorn7/perks/LICENSE deleted file mode 100644 index 339177b..0000000 --- a/cmd/bpa-operator/vendor/github.com/beorn7/perks/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (C) 2013 Blake Mizerany - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/cmd/bpa-operator/vendor/github.com/beorn7/perks/quantile/exampledata.txt b/cmd/bpa-operator/vendor/github.com/beorn7/perks/quantile/exampledata.txt deleted file mode 100644 index 1602287..0000000 --- a/cmd/bpa-operator/vendor/github.com/beorn7/perks/quantile/exampledata.txt +++ /dev/null @@ -1,2388 +0,0 @@ -8 -5 -26 -12 -5 -235 -13 -6 -28 -30 -3 -3 -3 -3 -5 -2 -33 -7 -2 -4 -7 -12 -14 -5 -8 -3 -10 -4 -5 -3 -6 -6 -209 -20 -3 -10 -14 -3 -4 -6 -8 -5 -11 -7 -3 -2 -3 -3 -212 -5 -222 -4 -10 -10 -5 -6 -3 -8 -3 -10 -254 -220 -2 -3 -5 -24 -5 -4 -222 -7 -3 -3 -223 -8 -15 -12 -14 -14 -3 -2 -2 -3 -13 -3 -11 -4 -4 -6 -5 -7 -13 -5 -3 -5 -2 -5 -3 -5 -2 -7 -15 -17 -14 -3 -6 -6 -3 -17 -5 -4 -7 -6 -4 -4 -8 -6 -8 -3 -9 -3 -6 -3 -4 -5 -3 -3 -660 -4 -6 -10 -3 -6 -3 -2 -5 -13 -2 -4 -4 -10 -4 -8 -4 -3 -7 -9 -9 -3 -10 -37 -3 -13 -4 -12 -3 -6 -10 -8 -5 -21 -2 -3 -8 -3 -2 -3 -3 -4 -12 -2 -4 -8 -8 -4 -3 -2 -20 -1 -6 -32 -2 -11 -6 -18 -3 -8 -11 -3 -212 -3 -4 -2 -6 -7 -12 -11 -3 -2 -16 -10 -6 -4 -6 -3 -2 -7 -3 -2 -2 -2 -2 -5 -6 -4 -3 -10 -3 -4 -6 -5 -3 -4 -4 -5 -6 -4 -3 -4 -4 -5 -7 -5 -5 -3 -2 -7 -2 -4 -12 -4 -5 -6 -2 -4 -4 -8 -4 -15 -13 -7 -16 -5 -3 -23 -5 -5 -7 -3 -2 -9 -8 -7 -5 -8 -11 -4 -10 -76 -4 -47 -4 -3 -2 -7 -4 -2 -3 -37 -10 -4 -2 -20 -5 -4 -4 -10 -10 -4 -3 -7 -23 -240 -7 -13 -5 -5 -3 -3 -2 -5 -4 -2 -8 -7 -19 -2 -23 -8 -7 -2 -5 -3 -8 -3 -8 -13 -5 -5 -5 -2 -3 -23 -4 -9 -8 -4 -3 -3 -5 -220 -2 -3 -4 -6 -14 -3 -53 -6 -2 -5 -18 -6 -3 -219 -6 -5 -2 -5 -3 -6 -5 -15 -4 -3 -17 -3 -2 -4 -7 -2 -3 -3 -4 -4 -3 -2 -664 -6 -3 -23 -5 -5 -16 -5 -8 -2 -4 -2 -24 -12 -3 -2 -3 -5 -8 -3 -5 -4 -3 -14 -3 -5 -8 -2 -3 -7 -9 -4 -2 -3 -6 -8 -4 -3 -4 -6 -5 -3 -3 -6 -3 -19 -4 -4 -6 -3 -6 -3 -5 -22 -5 -4 -4 -3 -8 -11 -4 -9 -7 -6 -13 -4 -4 -4 -6 -17 -9 -3 -3 -3 -4 -3 -221 -5 -11 -3 -4 -2 -12 -6 -3 -5 -7 -5 -7 -4 -9 -7 -14 -37 -19 -217 -16 -3 -5 -2 -2 -7 -19 -7 -6 -7 -4 -24 -5 -11 -4 -7 -7 -9 -13 -3 -4 -3 -6 -28 -4 -4 -5 -5 -2 -5 -6 -4 -4 -6 -10 -5 -4 -3 -2 -3 -3 -6 -5 -5 -4 -3 -2 -3 -7 -4 -6 -18 -16 -8 -16 -4 -5 -8 -6 -9 -13 -1545 -6 -215 -6 -5 -6 -3 -45 -31 -5 -2 -2 -4 -3 -3 -2 -5 -4 -3 -5 -7 -7 -4 -5 -8 -5 -4 -749 -2 -31 -9 -11 -2 -11 -5 -4 -4 -7 -9 -11 -4 -5 -4 -7 -3 -4 -6 -2 -15 -3 -4 -3 -4 -3 -5 -2 -13 -5 -5 -3 -3 -23 -4 -4 -5 -7 -4 -13 -2 -4 -3 -4 -2 -6 -2 -7 -3 -5 -5 -3 -29 -5 -4 -4 -3 -10 -2 -3 -79 -16 -6 -6 -7 -7 -3 -5 -5 -7 -4 -3 -7 -9 -5 -6 -5 -9 -6 -3 -6 -4 -17 -2 -10 -9 -3 -6 -2 -3 -21 -22 -5 -11 -4 -2 -17 -2 -224 -2 -14 -3 -4 -4 -2 -4 -4 -4 -4 -5 -3 -4 -4 -10 -2 -6 -3 -3 -5 -7 -2 -7 -5 -6 -3 -218 -2 -2 -5 -2 -6 -3 -5 -222 -14 -6 -33 -3 -2 -5 -3 -3 -3 -9 -5 -3 -3 -2 -7 -4 -3 -4 -3 -5 -6 -5 -26 -4 -13 -9 -7 -3 -221 -3 -3 -4 -4 -4 -4 -2 -18 -5 -3 -7 -9 -6 -8 -3 -10 -3 -11 -9 -5 -4 -17 -5 -5 -6 -6 -3 -2 -4 -12 -17 -6 -7 -218 -4 -2 -4 -10 -3 -5 -15 -3 -9 -4 -3 -3 -6 -29 -3 -3 -4 -5 -5 -3 -8 -5 -6 -6 -7 -5 -3 -5 -3 -29 -2 -31 -5 -15 -24 -16 -5 -207 -4 -3 -3 -2 -15 -4 -4 -13 -5 -5 -4 -6 -10 -2 -7 -8 -4 -6 -20 -5 -3 -4 -3 -12 -12 -5 -17 -7 -3 -3 -3 -6 -10 -3 -5 -25 -80 -4 -9 -3 -2 -11 -3 -3 -2 -3 -8 -7 -5 -5 -19 -5 -3 -3 -12 -11 -2 -6 -5 -5 -5 -3 -3 -3 -4 -209 -14 -3 -2 -5 -19 -4 -4 -3 -4 -14 -5 -6 -4 -13 -9 -7 -4 -7 -10 -2 -9 -5 -7 -2 -8 -4 -6 -5 -5 -222 -8 -7 -12 -5 -216 -3 -4 -4 -6 -3 -14 -8 -7 -13 -4 -3 -3 -3 -3 -17 -5 -4 -3 -33 -6 -6 -33 -7 -5 -3 -8 -7 -5 -2 -9 -4 -2 -233 -24 -7 -4 -8 -10 -3 -4 -15 -2 -16 -3 -3 -13 -12 -7 -5 -4 -207 -4 -2 -4 -27 -15 -2 -5 -2 -25 -6 -5 -5 -6 -13 -6 -18 -6 -4 -12 -225 -10 -7 -5 -2 -2 -11 -4 -14 -21 -8 -10 -3 -5 -4 -232 -2 -5 -5 -3 -7 -17 -11 -6 -6 -23 -4 -6 -3 -5 -4 -2 -17 -3 -6 -5 -8 -3 -2 -2 -14 -9 -4 -4 -2 -5 -5 -3 -7 -6 -12 -6 -10 -3 -6 -2 -2 -19 -5 -4 -4 -9 -2 -4 -13 -3 -5 -6 -3 -6 -5 -4 -9 -6 -3 -5 -7 -3 -6 -6 -4 -3 -10 -6 -3 -221 -3 -5 -3 -6 -4 -8 -5 -3 -6 -4 -4 -2 -54 -5 -6 -11 -3 -3 -4 -4 -4 -3 -7 -3 -11 -11 -7 -10 -6 -13 -223 -213 -15 -231 -7 -3 -7 -228 -2 -3 -4 -4 -5 -6 -7 -4 -13 -3 -4 -5 -3 -6 -4 -6 -7 -2 -4 -3 -4 -3 -3 -6 -3 -7 -3 -5 -18 -5 -6 -8 -10 -3 -3 -3 -2 -4 -2 -4 -4 -5 -6 -6 -4 -10 -13 -3 -12 -5 -12 -16 -8 -4 -19 -11 -2 -4 -5 -6 -8 -5 -6 -4 -18 -10 -4 -2 -216 -6 -6 -6 -2 -4 -12 -8 -3 -11 -5 -6 -14 -5 -3 -13 -4 -5 -4 -5 -3 -28 -6 -3 -7 -219 -3 -9 -7 -3 -10 -6 -3 -4 -19 -5 -7 -11 -6 -15 -19 -4 -13 -11 -3 -7 -5 -10 -2 -8 -11 -2 -6 -4 -6 -24 -6 -3 -3 -3 -3 -6 -18 -4 -11 -4 -2 -5 -10 -8 -3 -9 -5 -3 -4 -5 -6 -2 -5 -7 -4 -4 -14 -6 -4 -4 -5 -5 -7 -2 -4 -3 -7 -3 -3 -6 -4 -5 -4 -4 -4 -3 -3 -3 -3 -8 -14 -2 -3 -5 -3 -2 -4 -5 -3 -7 -3 -3 -18 -3 -4 -4 -5 -7 -3 -3 -3 -13 -5 -4 -8 -211 -5 -5 -3 -5 -2 -5 -4 -2 -655 -6 -3 -5 -11 -2 -5 -3 -12 -9 -15 -11 -5 -12 -217 -2 -6 -17 -3 -3 -207 -5 -5 -4 -5 -9 -3 -2 -8 -5 -4 -3 -2 -5 -12 -4 -14 -5 -4 -2 -13 -5 -8 -4 -225 -4 -3 -4 -5 -4 -3 -3 -6 -23 -9 -2 -6 -7 -233 -4 -4 -6 -18 -3 -4 -6 -3 -4 -4 -2 -3 -7 -4 -13 -227 -4 -3 -5 -4 -2 -12 -9 -17 -3 -7 -14 -6 -4 -5 -21 -4 -8 -9 -2 -9 -25 -16 -3 -6 -4 -7 -8 -5 -2 -3 -5 -4 -3 -3 -5 -3 -3 -3 -2 -3 -19 -2 -4 -3 -4 -2 -3 -4 -4 -2 -4 -3 -3 -3 -2 -6 -3 -17 -5 -6 -4 -3 -13 -5 -3 -3 -3 -4 -9 -4 -2 -14 -12 -4 -5 -24 -4 -3 -37 -12 -11 -21 -3 -4 -3 -13 -4 -2 -3 -15 -4 -11 -4 -4 -3 -8 -3 -4 -4 -12 -8 -5 -3 -3 -4 -2 -220 -3 -5 -223 -3 -3 -3 -10 -3 -15 -4 -241 -9 -7 -3 -6 -6 -23 -4 -13 -7 -3 -4 -7 -4 -9 -3 -3 -4 -10 -5 -5 -1 -5 -24 -2 -4 -5 -5 -6 -14 -3 -8 -2 -3 -5 -13 -13 -3 -5 -2 -3 -15 -3 -4 -2 -10 -4 -4 -4 -5 -5 -3 -5 -3 -4 -7 -4 -27 -3 -6 -4 -15 -3 -5 -6 -6 -5 -4 -8 -3 -9 -2 -6 -3 -4 -3 -7 -4 -18 -3 -11 -3 -3 -8 -9 -7 -24 -3 -219 -7 -10 -4 -5 -9 -12 -2 -5 -4 -4 -4 -3 -3 -19 -5 -8 -16 -8 -6 -22 -3 -23 -3 -242 -9 -4 -3 -3 -5 -7 -3 -3 -5 -8 -3 -7 -5 -14 -8 -10 -3 -4 -3 -7 -4 -6 -7 -4 -10 -4 -3 -11 -3 -7 -10 -3 -13 -6 -8 -12 -10 -5 -7 -9 -3 -4 -7 -7 -10 -8 -30 -9 -19 -4 -3 -19 -15 -4 -13 -3 -215 -223 -4 -7 -4 -8 -17 -16 -3 -7 -6 -5 -5 -4 -12 -3 -7 -4 -4 -13 -4 -5 -2 -5 -6 -5 -6 -6 -7 -10 -18 -23 -9 -3 -3 -6 -5 -2 -4 -2 -7 -3 -3 -2 -5 -5 -14 -10 -224 -6 -3 -4 -3 -7 -5 -9 -3 -6 -4 -2 -5 -11 -4 -3 -3 -2 -8 -4 -7 -4 -10 -7 -3 -3 -18 -18 -17 -3 -3 -3 -4 -5 -3 -3 -4 -12 -7 -3 -11 -13 -5 -4 -7 -13 -5 -4 -11 -3 -12 -3 -6 -4 -4 -21 -4 -6 -9 -5 -3 -10 -8 -4 -6 -4 -4 -6 -5 -4 -8 -6 -4 -6 -4 -4 -5 -9 -6 -3 -4 -2 -9 -3 -18 -2 -4 -3 -13 -3 -6 -6 -8 -7 -9 -3 -2 -16 -3 -4 -6 -3 -2 -33 -22 -14 -4 -9 -12 -4 -5 -6 -3 -23 -9 -4 -3 -5 -5 -3 -4 -5 -3 -5 -3 -10 -4 -5 -5 -8 -4 -4 -6 -8 -5 -4 -3 -4 -6 -3 -3 -3 -5 -9 -12 -6 -5 -9 -3 -5 -3 -2 -2 -2 -18 -3 -2 -21 -2 -5 -4 -6 -4 -5 -10 -3 -9 -3 -2 -10 -7 -3 -6 -6 -4 -4 -8 -12 -7 -3 -7 -3 -3 -9 -3 -4 -5 -4 -4 -5 -5 -10 -15 -4 -4 -14 -6 -227 -3 -14 -5 -216 -22 -5 -4 -2 -2 -6 -3 -4 -2 -9 -9 -4 -3 -28 -13 -11 -4 -5 -3 -3 -2 -3 -3 -5 -3 -4 -3 -5 -23 -26 -3 -4 -5 -6 -4 -6 -3 -5 -5 -3 -4 -3 -2 -2 -2 -7 -14 -3 -6 -7 -17 -2 -2 -15 -14 -16 -4 -6 -7 -13 -6 -4 -5 -6 -16 -3 -3 -28 -3 -6 -15 -3 -9 -2 -4 -6 -3 -3 -22 -4 -12 -6 -7 -2 -5 -4 -10 -3 -16 -6 -9 -2 -5 -12 -7 -5 -5 -5 -5 -2 -11 -9 -17 -4 -3 -11 -7 -3 -5 -15 -4 -3 -4 -211 -8 -7 -5 -4 -7 -6 -7 -6 -3 -6 -5 -6 -5 -3 -4 -4 -26 -4 -6 -10 -4 -4 -3 -2 -3 -3 -4 -5 -9 -3 -9 -4 -4 -5 -5 -8 -2 -4 -2 -3 -8 -4 -11 -19 -5 -8 -6 -3 -5 -6 -12 -3 -2 -4 -16 -12 -3 -4 -4 -8 -6 -5 -6 -6 -219 -8 -222 -6 -16 -3 -13 -19 -5 -4 -3 -11 -6 -10 -4 -7 -7 -12 -5 -3 -3 -5 -6 -10 -3 -8 -2 -5 -4 -7 -2 -4 -4 -2 -12 -9 -6 -4 -2 -40 -2 -4 -10 -4 -223 -4 -2 -20 -6 -7 -24 -5 -4 -5 -2 -20 -16 -6 -5 -13 -2 -3 -3 -19 -3 -2 -4 -5 -6 -7 -11 -12 -5 -6 -7 -7 -3 -5 -3 -5 -3 -14 -3 -4 -4 -2 -11 -1 -7 -3 -9 -6 -11 -12 -5 -8 -6 -221 -4 -2 -12 -4 -3 -15 -4 -5 -226 -7 -218 -7 -5 -4 -5 -18 -4 -5 -9 -4 -4 -2 -9 -18 -18 -9 -5 -6 -6 -3 -3 -7 -3 -5 -4 -4 -4 -12 -3 -6 -31 -5 -4 -7 -3 -6 -5 -6 -5 -11 -2 -2 -11 -11 -6 -7 -5 -8 -7 -10 -5 -23 -7 -4 -3 -5 -34 -2 -5 -23 -7 -3 -6 -8 -4 -4 -4 -2 -5 -3 -8 -5 -4 -8 -25 -2 -3 -17 -8 -3 -4 -8 -7 -3 -15 -6 -5 -7 -21 -9 -5 -6 -6 -5 -3 -2 -3 -10 -3 -6 -3 -14 -7 -4 -4 -8 -7 -8 -2 -6 -12 -4 -213 -6 -5 -21 -8 -2 -5 -23 -3 -11 -2 -3 -6 -25 -2 -3 -6 -7 -6 -6 -4 -4 -6 -3 -17 -9 -7 -6 -4 -3 -10 -7 -2 -3 -3 -3 -11 -8 -3 -7 -6 -4 -14 -36 -3 -4 -3 -3 -22 -13 -21 -4 -2 -7 -4 -4 -17 -15 -3 -7 -11 -2 -4 -7 -6 -209 -6 -3 -2 -2 -24 -4 -9 -4 -3 -3 -3 -29 -2 -2 -4 -3 -3 -5 -4 -6 -3 -3 -2 -4 diff --git a/cmd/bpa-operator/vendor/github.com/beorn7/perks/quantile/stream.go b/cmd/bpa-operator/vendor/github.com/beorn7/perks/quantile/stream.go deleted file mode 100644 index d7d14f8..0000000 --- a/cmd/bpa-operator/vendor/github.com/beorn7/perks/quantile/stream.go +++ /dev/null @@ -1,316 +0,0 @@ -// Package quantile computes approximate quantiles over an unbounded data -// stream within low memory and CPU bounds. -// -// A small amount of accuracy is traded to achieve the above properties. -// -// Multiple streams can be merged before calling Query to generate a single set -// of results. This is meaningful when the streams represent the same type of -// data. See Merge and Samples. -// -// For more detailed information about the algorithm used, see: -// -// Effective Computation of Biased Quantiles over Data Streams -// -// http://www.cs.rutgers.edu/~muthu/bquant.pdf -package quantile - -import ( - "math" - "sort" -) - -// Sample holds an observed value and meta information for compression. JSON -// tags have been added for convenience. -type Sample struct { - Value float64 `json:",string"` - Width float64 `json:",string"` - Delta float64 `json:",string"` -} - -// Samples represents a slice of samples. It implements sort.Interface. -type Samples []Sample - -func (a Samples) Len() int { return len(a) } -func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value } -func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -type invariant func(s *stream, r float64) float64 - -// NewLowBiased returns an initialized Stream for low-biased quantiles -// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but -// error guarantees can still be given even for the lower ranks of the data -// distribution. -// -// The provided epsilon is a relative error, i.e. the true quantile of a value -// returned by a query is guaranteed to be within (1±Epsilon)*Quantile. -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error -// properties. -func NewLowBiased(epsilon float64) *Stream { - ƒ := func(s *stream, r float64) float64 { - return 2 * epsilon * r - } - return newStream(ƒ) -} - -// NewHighBiased returns an initialized Stream for high-biased quantiles -// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but -// error guarantees can still be given even for the higher ranks of the data -// distribution. -// -// The provided epsilon is a relative error, i.e. the true quantile of a value -// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile). -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error -// properties. -func NewHighBiased(epsilon float64) *Stream { - ƒ := func(s *stream, r float64) float64 { - return 2 * epsilon * (s.n - r) - } - return newStream(ƒ) -} - -// NewTargeted returns an initialized Stream concerned with a particular set of -// quantile values that are supplied a priori. Knowing these a priori reduces -// space and computation time. The targets map maps the desired quantiles to -// their absolute errors, i.e. the true quantile of a value returned by a query -// is guaranteed to be within (Quantile±Epsilon). -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties. -func NewTargeted(targetMap map[float64]float64) *Stream { - // Convert map to slice to avoid slow iterations on a map. - // ƒ is called on the hot path, so converting the map to a slice - // beforehand results in significant CPU savings. - targets := targetMapToSlice(targetMap) - - ƒ := func(s *stream, r float64) float64 { - var m = math.MaxFloat64 - var f float64 - for _, t := range targets { - if t.quantile*s.n <= r { - f = (2 * t.epsilon * r) / t.quantile - } else { - f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile) - } - if f < m { - m = f - } - } - return m - } - return newStream(ƒ) -} - -type target struct { - quantile float64 - epsilon float64 -} - -func targetMapToSlice(targetMap map[float64]float64) []target { - targets := make([]target, 0, len(targetMap)) - - for quantile, epsilon := range targetMap { - t := target{ - quantile: quantile, - epsilon: epsilon, - } - targets = append(targets, t) - } - - return targets -} - -// Stream computes quantiles for a stream of float64s. It is not thread-safe by -// design. Take care when using across multiple goroutines. -type Stream struct { - *stream - b Samples - sorted bool -} - -func newStream(ƒ invariant) *Stream { - x := &stream{ƒ: ƒ} - return &Stream{x, make(Samples, 0, 500), true} -} - -// Insert inserts v into the stream. -func (s *Stream) Insert(v float64) { - s.insert(Sample{Value: v, Width: 1}) -} - -func (s *Stream) insert(sample Sample) { - s.b = append(s.b, sample) - s.sorted = false - if len(s.b) == cap(s.b) { - s.flush() - } -} - -// Query returns the computed qth percentiles value. If s was created with -// NewTargeted, and q is not in the set of quantiles provided a priori, Query -// will return an unspecified result. -func (s *Stream) Query(q float64) float64 { - if !s.flushed() { - // Fast path when there hasn't been enough data for a flush; - // this also yields better accuracy for small sets of data. - l := len(s.b) - if l == 0 { - return 0 - } - i := int(math.Ceil(float64(l) * q)) - if i > 0 { - i -= 1 - } - s.maybeSort() - return s.b[i].Value - } - s.flush() - return s.stream.query(q) -} - -// Merge merges samples into the underlying streams samples. This is handy when -// merging multiple streams from separate threads, database shards, etc. -// -// ATTENTION: This method is broken and does not yield correct results. The -// underlying algorithm is not capable of merging streams correctly. -func (s *Stream) Merge(samples Samples) { - sort.Sort(samples) - s.stream.merge(samples) -} - -// Reset reinitializes and clears the list reusing the samples buffer memory. -func (s *Stream) Reset() { - s.stream.reset() - s.b = s.b[:0] -} - -// Samples returns stream samples held by s. -func (s *Stream) Samples() Samples { - if !s.flushed() { - return s.b - } - s.flush() - return s.stream.samples() -} - -// Count returns the total number of samples observed in the stream -// since initialization. -func (s *Stream) Count() int { - return len(s.b) + s.stream.count() -} - -func (s *Stream) flush() { - s.maybeSort() - s.stream.merge(s.b) - s.b = s.b[:0] -} - -func (s *Stream) maybeSort() { - if !s.sorted { - s.sorted = true - sort.Sort(s.b) - } -} - -func (s *Stream) flushed() bool { - return len(s.stream.l) > 0 -} - -type stream struct { - n float64 - l []Sample - ƒ invariant -} - -func (s *stream) reset() { - s.l = s.l[:0] - s.n = 0 -} - -func (s *stream) insert(v float64) { - s.merge(Samples{{v, 1, 0}}) -} - -func (s *stream) merge(samples Samples) { - // TODO(beorn7): This tries to merge not only individual samples, but - // whole summaries. The paper doesn't mention merging summaries at - // all. Unittests show that the merging is inaccurate. Find out how to - // do merges properly. - var r float64 - i := 0 - for _, sample := range samples { - for ; i < len(s.l); i++ { - c := s.l[i] - if c.Value > sample.Value { - // Insert at position i. - s.l = append(s.l, Sample{}) - copy(s.l[i+1:], s.l[i:]) - s.l[i] = Sample{ - sample.Value, - sample.Width, - math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1), - // TODO(beorn7): How to calculate delta correctly? - } - i++ - goto inserted - } - r += c.Width - } - s.l = append(s.l, Sample{sample.Value, sample.Width, 0}) - i++ - inserted: - s.n += sample.Width - r += sample.Width - } - s.compress() -} - -func (s *stream) count() int { - return int(s.n) -} - -func (s *stream) query(q float64) float64 { - t := math.Ceil(q * s.n) - t += math.Ceil(s.ƒ(s, t) / 2) - p := s.l[0] - var r float64 - for _, c := range s.l[1:] { - r += p.Width - if r+c.Width+c.Delta > t { - return p.Value - } - p = c - } - return p.Value -} - -func (s *stream) compress() { - if len(s.l) < 2 { - return - } - x := s.l[len(s.l)-1] - xi := len(s.l) - 1 - r := s.n - 1 - x.Width - - for i := len(s.l) - 2; i >= 0; i-- { - c := s.l[i] - if c.Width+x.Width+x.Delta <= s.ƒ(s, r) { - x.Width += c.Width - s.l[xi] = x - // Remove element at i. - copy(s.l[i:], s.l[i+1:]) - s.l = s.l[:len(s.l)-1] - xi -= 1 - } else { - x = c - xi = i - } - r -= c.Width - } -} - -func (s *stream) samples() Samples { - samples := make(Samples, len(s.l)) - copy(samples, s.l) - return samples -} diff --git a/cmd/bpa-operator/vendor/github.com/census-instrumentation/opencensus-proto/AUTHORS b/cmd/bpa-operator/vendor/github.com/census-instrumentation/opencensus-proto/AUTHORS deleted file mode 100644 index e068e73..0000000 --- a/cmd/bpa-operator/vendor/github.com/census-instrumentation/opencensus-proto/AUTHORS +++ /dev/null @@ -1 +0,0 @@ -Google Inc. \ No newline at end of file diff --git a/cmd/bpa-operator/vendor/github.com/census-instrumentation/opencensus-proto/LICENSE b/cmd/bpa-operator/vendor/github.com/census-instrumentation/opencensus-proto/LICENSE deleted file mode 100644 index d645695..0000000 --- a/cmd/bpa-operator/vendor/github.com/census-instrumentation/opencensus-proto/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/cmd/bpa-operator/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1/common.pb.go b/cmd/bpa-operator/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1/common.pb.go deleted file mode 100644 index 12b578d..0000000 --- a/cmd/bpa-operator/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1/common.pb.go +++ /dev/null @@ -1,356 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: opencensus/proto/agent/common/v1/common.proto - -package v1 - -import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - timestamp "github.com/golang/protobuf/ptypes/timestamp" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -type LibraryInfo_Language int32 - -const ( - LibraryInfo_LANGUAGE_UNSPECIFIED LibraryInfo_Language = 0 - LibraryInfo_CPP LibraryInfo_Language = 1 - LibraryInfo_C_SHARP LibraryInfo_Language = 2 - LibraryInfo_ERLANG LibraryInfo_Language = 3 - LibraryInfo_GO_LANG LibraryInfo_Language = 4 - LibraryInfo_JAVA LibraryInfo_Language = 5 - LibraryInfo_NODE_JS LibraryInfo_Language = 6 - LibraryInfo_PHP LibraryInfo_Language = 7 - LibraryInfo_PYTHON LibraryInfo_Language = 8 - LibraryInfo_RUBY LibraryInfo_Language = 9 -) - -var LibraryInfo_Language_name = map[int32]string{ - 0: "LANGUAGE_UNSPECIFIED", - 1: "CPP", - 2: "C_SHARP", - 3: "ERLANG", - 4: "GO_LANG", - 5: "JAVA", - 6: "NODE_JS", - 7: "PHP", - 8: "PYTHON", - 9: "RUBY", -} - -var LibraryInfo_Language_value = map[string]int32{ - "LANGUAGE_UNSPECIFIED": 0, - "CPP": 1, - "C_SHARP": 2, - "ERLANG": 3, - "GO_LANG": 4, - "JAVA": 5, - "NODE_JS": 6, - "PHP": 7, - "PYTHON": 8, - "RUBY": 9, -} - -func (x LibraryInfo_Language) String() string { - return proto.EnumName(LibraryInfo_Language_name, int32(x)) -} - -func (LibraryInfo_Language) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_126c72ed8a252c84, []int{2, 0} -} - -// Identifier metadata of the Node that produces the span or tracing data. -// Note, this is not the metadata about the Node or service that is described by associated spans. -// In the future we plan to extend the identifier proto definition to support -// additional information (e.g cloud id, etc.) -type Node struct { - // Identifier that uniquely identifies a process within a VM/container. - Identifier *ProcessIdentifier `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"` - // Information on the OpenCensus Library that initiates the stream. - LibraryInfo *LibraryInfo `protobuf:"bytes,2,opt,name=library_info,json=libraryInfo,proto3" json:"library_info,omitempty"` - // Additional information on service. - ServiceInfo *ServiceInfo `protobuf:"bytes,3,opt,name=service_info,json=serviceInfo,proto3" json:"service_info,omitempty"` - // Additional attributes. - Attributes map[string]string `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Node) Reset() { *m = Node{} } -func (m *Node) String() string { return proto.CompactTextString(m) } -func (*Node) ProtoMessage() {} -func (*Node) Descriptor() ([]byte, []int) { - return fileDescriptor_126c72ed8a252c84, []int{0} -} - -func (m *Node) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Node.Unmarshal(m, b) -} -func (m *Node) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Node.Marshal(b, m, deterministic) -} -func (m *Node) XXX_Merge(src proto.Message) { - xxx_messageInfo_Node.Merge(m, src) -} -func (m *Node) XXX_Size() int { - return xxx_messageInfo_Node.Size(m) -} -func (m *Node) XXX_DiscardUnknown() { - xxx_messageInfo_Node.DiscardUnknown(m) -} - -var xxx_messageInfo_Node proto.InternalMessageInfo - -func (m *Node) GetIdentifier() *ProcessIdentifier { - if m != nil { - return m.Identifier - } - return nil -} - -func (m *Node) GetLibraryInfo() *LibraryInfo { - if m != nil { - return m.LibraryInfo - } - return nil -} - -func (m *Node) GetServiceInfo() *ServiceInfo { - if m != nil { - return m.ServiceInfo - } - return nil -} - -func (m *Node) GetAttributes() map[string]string { - if m != nil { - return m.Attributes - } - return nil -} - -// Identifier that uniquely identifies a process within a VM/container. -type ProcessIdentifier struct { - // The host name. Usually refers to the machine/container name. - // For example: os.Hostname() in Go, socket.gethostname() in Python. - HostName string `protobuf:"bytes,1,opt,name=host_name,json=hostName,proto3" json:"host_name,omitempty"` - // Process id. - Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"` - // Start time of this ProcessIdentifier. Represented in epoch time. - StartTimestamp *timestamp.Timestamp `protobuf:"bytes,3,opt,name=start_timestamp,json=startTimestamp,proto3" json:"start_timestamp,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ProcessIdentifier) Reset() { *m = ProcessIdentifier{} } -func (m *ProcessIdentifier) String() string { return proto.CompactTextString(m) } -func (*ProcessIdentifier) ProtoMessage() {} -func (*ProcessIdentifier) Descriptor() ([]byte, []int) { - return fileDescriptor_126c72ed8a252c84, []int{1} -} - -func (m *ProcessIdentifier) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ProcessIdentifier.Unmarshal(m, b) -} -func (m *ProcessIdentifier) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ProcessIdentifier.Marshal(b, m, deterministic) -} -func (m *ProcessIdentifier) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProcessIdentifier.Merge(m, src) -} -func (m *ProcessIdentifier) XXX_Size() int { - return xxx_messageInfo_ProcessIdentifier.Size(m) -} -func (m *ProcessIdentifier) XXX_DiscardUnknown() { - xxx_messageInfo_ProcessIdentifier.DiscardUnknown(m) -} - -var xxx_messageInfo_ProcessIdentifier proto.InternalMessageInfo - -func (m *ProcessIdentifier) GetHostName() string { - if m != nil { - return m.HostName - } - return "" -} - -func (m *ProcessIdentifier) GetPid() uint32 { - if m != nil { - return m.Pid - } - return 0 -} - -func (m *ProcessIdentifier) GetStartTimestamp() *timestamp.Timestamp { - if m != nil { - return m.StartTimestamp - } - return nil -} - -// Information on OpenCensus Library. -type LibraryInfo struct { - // Language of OpenCensus Library. - Language LibraryInfo_Language `protobuf:"varint,1,opt,name=language,proto3,enum=opencensus.proto.agent.common.v1.LibraryInfo_Language" json:"language,omitempty"` - // Version of Agent exporter of Library. - ExporterVersion string `protobuf:"bytes,2,opt,name=exporter_version,json=exporterVersion,proto3" json:"exporter_version,omitempty"` - // Version of OpenCensus Library. - CoreLibraryVersion string `protobuf:"bytes,3,opt,name=core_library_version,json=coreLibraryVersion,proto3" json:"core_library_version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LibraryInfo) Reset() { *m = LibraryInfo{} } -func (m *LibraryInfo) String() string { return proto.CompactTextString(m) } -func (*LibraryInfo) ProtoMessage() {} -func (*LibraryInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_126c72ed8a252c84, []int{2} -} - -func (m *LibraryInfo) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LibraryInfo.Unmarshal(m, b) -} -func (m *LibraryInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LibraryInfo.Marshal(b, m, deterministic) -} -func (m *LibraryInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_LibraryInfo.Merge(m, src) -} -func (m *LibraryInfo) XXX_Size() int { - return xxx_messageInfo_LibraryInfo.Size(m) -} -func (m *LibraryInfo) XXX_DiscardUnknown() { - xxx_messageInfo_LibraryInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_LibraryInfo proto.InternalMessageInfo - -func (m *LibraryInfo) GetLanguage() LibraryInfo_Language { - if m != nil { - return m.Language - } - return LibraryInfo_LANGUAGE_UNSPECIFIED -} - -func (m *LibraryInfo) GetExporterVersion() string { - if m != nil { - return m.ExporterVersion - } - return "" -} - -func (m *LibraryInfo) GetCoreLibraryVersion() string { - if m != nil { - return m.CoreLibraryVersion - } - return "" -} - -// Additional service information. -type ServiceInfo struct { - // Name of the service. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ServiceInfo) Reset() { *m = ServiceInfo{} } -func (m *ServiceInfo) String() string { return proto.CompactTextString(m) } -func (*ServiceInfo) ProtoMessage() {} -func (*ServiceInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_126c72ed8a252c84, []int{3} -} - -func (m *ServiceInfo) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ServiceInfo.Unmarshal(m, b) -} -func (m *ServiceInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ServiceInfo.Marshal(b, m, deterministic) -} -func (m *ServiceInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServiceInfo.Merge(m, src) -} -func (m *ServiceInfo) XXX_Size() int { - return xxx_messageInfo_ServiceInfo.Size(m) -} -func (m *ServiceInfo) XXX_DiscardUnknown() { - xxx_messageInfo_ServiceInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_ServiceInfo proto.InternalMessageInfo - -func (m *ServiceInfo) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func init() { - proto.RegisterEnum("opencensus.proto.agent.common.v1.LibraryInfo_Language", LibraryInfo_Language_name, LibraryInfo_Language_value) - proto.RegisterType((*Node)(nil), "opencensus.proto.agent.common.v1.Node") - proto.RegisterMapType((map[string]string)(nil), "opencensus.proto.agent.common.v1.Node.AttributesEntry") - proto.RegisterType((*ProcessIdentifier)(nil), "opencensus.proto.agent.common.v1.ProcessIdentifier") - proto.RegisterType((*LibraryInfo)(nil), "opencensus.proto.agent.common.v1.LibraryInfo") - proto.RegisterType((*ServiceInfo)(nil), "opencensus.proto.agent.common.v1.ServiceInfo") -} - -func init() { - proto.RegisterFile("opencensus/proto/agent/common/v1/common.proto", fileDescriptor_126c72ed8a252c84) -} - -var fileDescriptor_126c72ed8a252c84 = []byte{ - // 590 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x94, 0x4f, 0x4f, 0xdb, 0x3e, - 0x1c, 0xc6, 0x7f, 0x69, 0x0a, 0xb4, 0xdf, 0xfc, 0x06, 0x99, 0xc5, 0xa1, 0x62, 0x87, 0xb1, 0xee, - 0xc2, 0x0e, 0x4d, 0x06, 0x48, 0xd3, 0x34, 0x69, 0x87, 0x52, 0x3a, 0x28, 0x42, 0x25, 0x72, 0x01, - 0x89, 0x5d, 0xa2, 0xb4, 0xb8, 0xc1, 0x5a, 0x63, 0x57, 0xb6, 0x53, 0x8d, 0xd3, 0x8e, 0xd3, 0xde, - 0xc0, 0x5e, 0xd4, 0x5e, 0xd5, 0x64, 0x3b, 0x69, 0xa3, 0x71, 0x28, 0xb7, 0xef, 0x9f, 0xe7, 0xf9, - 0x38, 0x7a, 0x6c, 0x05, 0x3a, 0x7c, 0x4e, 0xd8, 0x84, 0x30, 0x99, 0xcb, 0x70, 0x2e, 0xb8, 0xe2, - 0x61, 0x92, 0x12, 0xa6, 0xc2, 0x09, 0xcf, 0x32, 0xce, 0xc2, 0xc5, 0x61, 0x51, 0x05, 0x66, 0x89, - 0xf6, 0x57, 0x72, 0x3b, 0x09, 0x8c, 0x3c, 0x28, 0x44, 0x8b, 0xc3, 0xbd, 0xd7, 0x29, 0xe7, 0xe9, - 0x8c, 0x58, 0xd8, 0x38, 0x9f, 0x86, 0x8a, 0x66, 0x44, 0xaa, 0x24, 0x9b, 0x5b, 0x43, 0xfb, 0xb7, - 0x0b, 0xf5, 0x21, 0xbf, 0x27, 0x68, 0x04, 0x40, 0xef, 0x09, 0x53, 0x74, 0x4a, 0x89, 0x68, 0x39, - 0xfb, 0xce, 0x81, 0x77, 0x74, 0x1c, 0xac, 0x3b, 0x20, 0x88, 0x04, 0x9f, 0x10, 0x29, 0x07, 0x4b, - 0x2b, 0xae, 0x60, 0x50, 0x04, 0xff, 0xcf, 0xe8, 0x58, 0x24, 0xe2, 0x31, 0xa6, 0x6c, 0xca, 0x5b, - 0x35, 0x83, 0xed, 0xac, 0xc7, 0x5e, 0x5a, 0xd7, 0x80, 0x4d, 0x39, 0xf6, 0x66, 0xab, 0x46, 0x13, - 0x25, 0x11, 0x0b, 0x3a, 0x21, 0x96, 0xe8, 0x3e, 0x97, 0x38, 0xb2, 0x2e, 0x4b, 0x94, 0xab, 0x06, - 0xdd, 0x02, 0x24, 0x4a, 0x09, 0x3a, 0xce, 0x15, 0x91, 0xad, 0xfa, 0xbe, 0x7b, 0xe0, 0x1d, 0x7d, - 0x58, 0xcf, 0xd3, 0xa1, 0x05, 0xdd, 0xa5, 0xb1, 0xcf, 0x94, 0x78, 0xc4, 0x15, 0xd2, 0xde, 0x67, - 0xd8, 0xf9, 0x67, 0x8d, 0x7c, 0x70, 0xbf, 0x91, 0x47, 0x13, 0x6e, 0x13, 0xeb, 0x12, 0xed, 0xc2, - 0xc6, 0x22, 0x99, 0xe5, 0xc4, 0x24, 0xd3, 0xc4, 0xb6, 0xf9, 0x54, 0xfb, 0xe8, 0xb4, 0x7f, 0x3a, - 0xf0, 0xf2, 0x49, 0xb8, 0xe8, 0x15, 0x34, 0x1f, 0xb8, 0x54, 0x31, 0x4b, 0x32, 0x52, 0x70, 0x1a, - 0x7a, 0x30, 0x4c, 0x32, 0xa2, 0xf1, 0x73, 0x7a, 0x6f, 0x50, 0x2f, 0xb0, 0x2e, 0x51, 0x0f, 0x76, - 0xa4, 0x4a, 0x84, 0x8a, 0x97, 0xd7, 0x5e, 0x04, 0xb6, 0x17, 0xd8, 0x87, 0x11, 0x94, 0x0f, 0x23, - 0xb8, 0x2e, 0x15, 0x78, 0xdb, 0x58, 0x96, 0x7d, 0xfb, 0x4f, 0x0d, 0xbc, 0xca, 0x7d, 0x20, 0x0c, - 0x8d, 0x59, 0xc2, 0xd2, 0x3c, 0x49, 0xed, 0x27, 0x6c, 0x3f, 0x27, 0xae, 0x0a, 0x20, 0xb8, 0x2c, - 0xdc, 0x78, 0xc9, 0x41, 0xef, 0xc0, 0x27, 0xdf, 0xe7, 0x5c, 0x28, 0x22, 0xe2, 0x05, 0x11, 0x92, - 0x72, 0x56, 0x44, 0xb2, 0x53, 0xce, 0x6f, 0xed, 0x18, 0xbd, 0x87, 0xdd, 0x09, 0x17, 0x24, 0x2e, - 0x1f, 0x56, 0x29, 0x77, 0x8d, 0x1c, 0xe9, 0x5d, 0x71, 0x58, 0xe1, 0x68, 0xff, 0x72, 0xa0, 0x51, - 0x9e, 0x89, 0x5a, 0xb0, 0x7b, 0xd9, 0x1d, 0x9e, 0xdd, 0x74, 0xcf, 0xfa, 0xf1, 0xcd, 0x70, 0x14, - 0xf5, 0x7b, 0x83, 0x2f, 0x83, 0xfe, 0xa9, 0xff, 0x1f, 0xda, 0x02, 0xb7, 0x17, 0x45, 0xbe, 0x83, - 0x3c, 0xd8, 0xea, 0xc5, 0xa3, 0xf3, 0x2e, 0x8e, 0xfc, 0x1a, 0x02, 0xd8, 0xec, 0x63, 0xed, 0xf0, - 0x5d, 0xbd, 0x38, 0xbb, 0x8a, 0x4d, 0x53, 0x47, 0x0d, 0xa8, 0x5f, 0x74, 0x6f, 0xbb, 0xfe, 0x86, - 0x1e, 0x0f, 0xaf, 0x4e, 0xfb, 0xf1, 0xc5, 0xc8, 0xdf, 0xd4, 0x94, 0xe8, 0x3c, 0xf2, 0xb7, 0xb4, - 0x31, 0xba, 0xbb, 0x3e, 0xbf, 0x1a, 0xfa, 0x0d, 0xad, 0xc5, 0x37, 0x27, 0x77, 0x7e, 0xb3, 0xfd, - 0x06, 0xbc, 0xca, 0x4b, 0x44, 0x08, 0xea, 0x95, 0xab, 0x34, 0xf5, 0xc9, 0x0f, 0x78, 0x4b, 0xf9, - 0xda, 0x44, 0x4f, 0xbc, 0x9e, 0x29, 0x23, 0xbd, 0x8c, 0x9c, 0xaf, 0x83, 0x94, 0xaa, 0x87, 0x7c, - 0xac, 0x05, 0xa1, 0xf5, 0x75, 0x28, 0x93, 0x4a, 0xe4, 0x19, 0x61, 0x2a, 0x51, 0x94, 0xb3, 0x70, - 0x85, 0xec, 0xd8, 0x9f, 0x4b, 0x4a, 0x58, 0x27, 0x7d, 0xf2, 0x8f, 0x19, 0x6f, 0x9a, 0xed, 0xf1, - 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x94, 0xe5, 0x77, 0x76, 0x8e, 0x04, 0x00, 0x00, -} diff --git a/cmd/bpa-operator/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.go b/cmd/bpa-operator/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.go deleted file mode 100644 index 801212d..0000000 --- a/cmd/bpa-operator/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.go +++ /dev/null @@ -1,264 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: opencensus/proto/agent/metrics/v1/metrics_service.proto - -package v1 - -import ( - context "context" - fmt "fmt" - v1 "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" - v11 "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" - v12 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" - proto "github.com/golang/protobuf/proto" - grpc "google.golang.org/grpc" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -type ExportMetricsServiceRequest struct { - // This is required only in the first message on the stream or if the - // previous sent ExportMetricsServiceRequest message has a different Node (e.g. - // when the same RPC is used to send Metrics from multiple Applications). - Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` - // A list of metrics that belong to the last received Node. - Metrics []*v11.Metric `protobuf:"bytes,2,rep,name=metrics,proto3" json:"metrics,omitempty"` - // The resource for the metrics in this message that do not have an explicit - // resource set. - // If unset, the most recently set resource in the RPC stream applies. It is - // valid to never be set within a stream, e.g. when no resource info is known - // at all or when all sent metrics have an explicit resource set. - Resource *v12.Resource `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ExportMetricsServiceRequest) Reset() { *m = ExportMetricsServiceRequest{} } -func (m *ExportMetricsServiceRequest) String() string { return proto.CompactTextString(m) } -func (*ExportMetricsServiceRequest) ProtoMessage() {} -func (*ExportMetricsServiceRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_47e253a956287d04, []int{0} -} - -func (m *ExportMetricsServiceRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExportMetricsServiceRequest.Unmarshal(m, b) -} -func (m *ExportMetricsServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExportMetricsServiceRequest.Marshal(b, m, deterministic) -} -func (m *ExportMetricsServiceRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExportMetricsServiceRequest.Merge(m, src) -} -func (m *ExportMetricsServiceRequest) XXX_Size() int { - return xxx_messageInfo_ExportMetricsServiceRequest.Size(m) -} -func (m *ExportMetricsServiceRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ExportMetricsServiceRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ExportMetricsServiceRequest proto.InternalMessageInfo - -func (m *ExportMetricsServiceRequest) GetNode() *v1.Node { - if m != nil { - return m.Node - } - return nil -} - -func (m *ExportMetricsServiceRequest) GetMetrics() []*v11.Metric { - if m != nil { - return m.Metrics - } - return nil -} - -func (m *ExportMetricsServiceRequest) GetResource() *v12.Resource { - if m != nil { - return m.Resource - } - return nil -} - -type ExportMetricsServiceResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ExportMetricsServiceResponse) Reset() { *m = ExportMetricsServiceResponse{} } -func (m *ExportMetricsServiceResponse) String() string { return proto.CompactTextString(m) } -func (*ExportMetricsServiceResponse) ProtoMessage() {} -func (*ExportMetricsServiceResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_47e253a956287d04, []int{1} -} - -func (m *ExportMetricsServiceResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExportMetricsServiceResponse.Unmarshal(m, b) -} -func (m *ExportMetricsServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExportMetricsServiceResponse.Marshal(b, m, deterministic) -} -func (m *ExportMetricsServiceResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExportMetricsServiceResponse.Merge(m, src) -} -func (m *ExportMetricsServiceResponse) XXX_Size() int { - return xxx_messageInfo_ExportMetricsServiceResponse.Size(m) -} -func (m *ExportMetricsServiceResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ExportMetricsServiceResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ExportMetricsServiceResponse proto.InternalMessageInfo - -func init() { - proto.RegisterType((*ExportMetricsServiceRequest)(nil), "opencensus.proto.agent.metrics.v1.ExportMetricsServiceRequest") - proto.RegisterType((*ExportMetricsServiceResponse)(nil), "opencensus.proto.agent.metrics.v1.ExportMetricsServiceResponse") -} - -func init() { - proto.RegisterFile("opencensus/proto/agent/metrics/v1/metrics_service.proto", fileDescriptor_47e253a956287d04) -} - -var fileDescriptor_47e253a956287d04 = []byte{ - // 340 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x92, 0xc1, 0x4a, 0xf3, 0x40, - 0x14, 0x85, 0xff, 0xf9, 0x2b, 0x55, 0xa6, 0xe0, 0x62, 0xdc, 0x94, 0x2a, 0x52, 0xab, 0x48, 0x45, - 0x32, 0x63, 0xea, 0x42, 0x10, 0x54, 0x28, 0xb8, 0x11, 0x94, 0x12, 0x77, 0x6e, 0xa4, 0x4d, 0x2f, - 0x71, 0x16, 0x99, 0x1b, 0x67, 0x26, 0xc1, 0x57, 0x70, 0xe5, 0x3b, 0xf8, 0x5c, 0x3e, 0x8c, 0x24, - 0x93, 0xb4, 0x94, 0x18, 0x0b, 0xee, 0x2e, 0x99, 0xf3, 0x9d, 0x9c, 0x33, 0x73, 0xe9, 0x05, 0x26, - 0xa0, 0x42, 0x50, 0x26, 0x35, 0x22, 0xd1, 0x68, 0x51, 0x4c, 0x23, 0x50, 0x56, 0xc4, 0x60, 0xb5, - 0x0c, 0x8d, 0xc8, 0xfc, 0x6a, 0x7c, 0x36, 0xa0, 0x33, 0x19, 0x02, 0x2f, 0x64, 0xec, 0x60, 0x09, - 0xba, 0x2f, 0xbc, 0x00, 0x79, 0xa9, 0xe6, 0x99, 0xdf, 0xf3, 0x1a, 0xbc, 0x43, 0x8c, 0x63, 0x54, - 0xb9, 0xb5, 0x9b, 0x1c, 0xdf, 0x3b, 0xa9, 0xc9, 0xeb, 0x21, 0x4a, 0xe9, 0x69, 0x4d, 0xaa, 0xc1, - 0x60, 0xaa, 0x43, 0xc8, 0xb5, 0xd5, 0xec, 0xc4, 0x83, 0x2f, 0x42, 0x77, 0x6f, 0xdf, 0x12, 0xd4, - 0xf6, 0xde, 0x99, 0x3c, 0xba, 0x22, 0x01, 0xbc, 0xa6, 0x60, 0x2c, 0xbb, 0xa4, 0x1b, 0x0a, 0xe7, - 0xd0, 0x25, 0x7d, 0x32, 0xec, 0x8c, 0x8e, 0x79, 0x43, 0xb1, 0x32, 0x6b, 0xe6, 0xf3, 0x07, 0x9c, - 0x43, 0x50, 0x30, 0xec, 0x8a, 0x6e, 0x96, 0xc9, 0xba, 0xff, 0xfb, 0xad, 0x61, 0x67, 0x74, 0x58, - 0xc7, 0x97, 0x37, 0xc2, 0x5d, 0x80, 0xa0, 0x62, 0xd8, 0x98, 0x6e, 0x55, 0x61, 0xbb, 0xad, 0xa6, - 0xdf, 0x2f, 0xea, 0x64, 0x3e, 0x0f, 0xca, 0x39, 0x58, 0x70, 0x83, 0x7d, 0xba, 0xf7, 0x73, 0x3b, - 0x93, 0xa0, 0x32, 0x30, 0xfa, 0x24, 0x74, 0x7b, 0xf5, 0x88, 0x7d, 0x10, 0xda, 0x76, 0x0c, 0xbb, - 0xe6, 0x6b, 0xdf, 0x91, 0xff, 0x72, 0x79, 0xbd, 0x9b, 0x3f, 0xf3, 0x2e, 0xde, 0xe0, 0xdf, 0x90, - 0x9c, 0x91, 0xf1, 0x3b, 0xa1, 0x47, 0x12, 0xd7, 0x7b, 0x8d, 0x77, 0x56, 0x6d, 0x26, 0xb9, 0x6a, - 0x42, 0x9e, 0xee, 0x22, 0x69, 0x5f, 0xd2, 0x59, 0xfe, 0x48, 0xc2, 0x19, 0x78, 0x52, 0x19, 0xab, - 0xd3, 0x18, 0x94, 0x9d, 0x5a, 0x89, 0x4a, 0x2c, 0xbd, 0x3d, 0xb7, 0x32, 0x11, 0x28, 0x2f, 0xaa, - 0xef, 0xfb, 0xac, 0x5d, 0x1c, 0x9f, 0x7f, 0x07, 0x00, 0x00, 0xff, 0xff, 0x16, 0x61, 0x3b, 0xc3, - 0x1b, 0x03, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// MetricsServiceClient is the client API for MetricsService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type MetricsServiceClient interface { - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. - Export(ctx context.Context, opts ...grpc.CallOption) (MetricsService_ExportClient, error) -} - -type metricsServiceClient struct { - cc *grpc.ClientConn -} - -func NewMetricsServiceClient(cc *grpc.ClientConn) MetricsServiceClient { - return &metricsServiceClient{cc} -} - -func (c *metricsServiceClient) Export(ctx context.Context, opts ...grpc.CallOption) (MetricsService_ExportClient, error) { - stream, err := c.cc.NewStream(ctx, &_MetricsService_serviceDesc.Streams[0], "/opencensus.proto.agent.metrics.v1.MetricsService/Export", opts...) - if err != nil { - return nil, err - } - x := &metricsServiceExportClient{stream} - return x, nil -} - -type MetricsService_ExportClient interface { - Send(*ExportMetricsServiceRequest) error - Recv() (*ExportMetricsServiceResponse, error) - grpc.ClientStream -} - -type metricsServiceExportClient struct { - grpc.ClientStream -} - -func (x *metricsServiceExportClient) Send(m *ExportMetricsServiceRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *metricsServiceExportClient) Recv() (*ExportMetricsServiceResponse, error) { - m := new(ExportMetricsServiceResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// MetricsServiceServer is the server API for MetricsService service. -type MetricsServiceServer interface { - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. - Export(MetricsService_ExportServer) error -} - -func RegisterMetricsServiceServer(s *grpc.Server, srv MetricsServiceServer) { - s.RegisterService(&_MetricsService_serviceDesc, srv) -} - -func _MetricsService_Export_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(MetricsServiceServer).Export(&metricsServiceExportServer{stream}) -} - -type MetricsService_ExportServer interface { - Send(*ExportMetricsServiceResponse) error - Recv() (*ExportMetricsServiceRequest, error) - grpc.ServerStream -} - -type metricsServiceExportServer struct { - grpc.ServerStream -} - -func (x *metricsServiceExportServer) Send(m *ExportMetricsServiceResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *metricsServiceExportServer) Recv() (*ExportMetricsServiceRequest, error) { - m := new(ExportMetricsServiceRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -var _MetricsService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "opencensus.proto.agent.metrics.v1.MetricsService", - HandlerType: (*MetricsServiceServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "Export", - Handler: _MetricsService_Export_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "opencensus/proto/agent/metrics/v1/metrics_service.proto", -} diff --git a/cmd/bpa-operator/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.go b/cmd/bpa-operator/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.go deleted file mode 100644 index e7c49a3..0000000 --- a/cmd/bpa-operator/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.go +++ /dev/null @@ -1,443 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: opencensus/proto/agent/trace/v1/trace_service.proto - -package v1 - -import ( - context "context" - fmt "fmt" - v1 "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" - v12 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" - v11 "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1" - proto "github.com/golang/protobuf/proto" - grpc "google.golang.org/grpc" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -type CurrentLibraryConfig struct { - // This is required only in the first message on the stream or if the - // previous sent CurrentLibraryConfig message has a different Node (e.g. - // when the same RPC is used to configure multiple Applications). - Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` - // Current configuration. - Config *v11.TraceConfig `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CurrentLibraryConfig) Reset() { *m = CurrentLibraryConfig{} } -func (m *CurrentLibraryConfig) String() string { return proto.CompactTextString(m) } -func (*CurrentLibraryConfig) ProtoMessage() {} -func (*CurrentLibraryConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_7027f99caf7ac6a5, []int{0} -} - -func (m *CurrentLibraryConfig) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CurrentLibraryConfig.Unmarshal(m, b) -} -func (m *CurrentLibraryConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CurrentLibraryConfig.Marshal(b, m, deterministic) -} -func (m *CurrentLibraryConfig) XXX_Merge(src proto.Message) { - xxx_messageInfo_CurrentLibraryConfig.Merge(m, src) -} -func (m *CurrentLibraryConfig) XXX_Size() int { - return xxx_messageInfo_CurrentLibraryConfig.Size(m) -} -func (m *CurrentLibraryConfig) XXX_DiscardUnknown() { - xxx_messageInfo_CurrentLibraryConfig.DiscardUnknown(m) -} - -var xxx_messageInfo_CurrentLibraryConfig proto.InternalMessageInfo - -func (m *CurrentLibraryConfig) GetNode() *v1.Node { - if m != nil { - return m.Node - } - return nil -} - -func (m *CurrentLibraryConfig) GetConfig() *v11.TraceConfig { - if m != nil { - return m.Config - } - return nil -} - -type UpdatedLibraryConfig struct { - // This field is ignored when the RPC is used to configure only one Application. - // This is required only in the first message on the stream or if the - // previous sent UpdatedLibraryConfig message has a different Node. - Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` - // Requested updated configuration. - Config *v11.TraceConfig `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UpdatedLibraryConfig) Reset() { *m = UpdatedLibraryConfig{} } -func (m *UpdatedLibraryConfig) String() string { return proto.CompactTextString(m) } -func (*UpdatedLibraryConfig) ProtoMessage() {} -func (*UpdatedLibraryConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_7027f99caf7ac6a5, []int{1} -} - -func (m *UpdatedLibraryConfig) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UpdatedLibraryConfig.Unmarshal(m, b) -} -func (m *UpdatedLibraryConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UpdatedLibraryConfig.Marshal(b, m, deterministic) -} -func (m *UpdatedLibraryConfig) XXX_Merge(src proto.Message) { - xxx_messageInfo_UpdatedLibraryConfig.Merge(m, src) -} -func (m *UpdatedLibraryConfig) XXX_Size() int { - return xxx_messageInfo_UpdatedLibraryConfig.Size(m) -} -func (m *UpdatedLibraryConfig) XXX_DiscardUnknown() { - xxx_messageInfo_UpdatedLibraryConfig.DiscardUnknown(m) -} - -var xxx_messageInfo_UpdatedLibraryConfig proto.InternalMessageInfo - -func (m *UpdatedLibraryConfig) GetNode() *v1.Node { - if m != nil { - return m.Node - } - return nil -} - -func (m *UpdatedLibraryConfig) GetConfig() *v11.TraceConfig { - if m != nil { - return m.Config - } - return nil -} - -type ExportTraceServiceRequest struct { - // This is required only in the first message on the stream or if the - // previous sent ExportTraceServiceRequest message has a different Node (e.g. - // when the same RPC is used to send Spans from multiple Applications). - Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` - // A list of Spans that belong to the last received Node. - Spans []*v11.Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"` - // The resource for the spans in this message that do not have an explicit - // resource set. - // If unset, the most recently set resource in the RPC stream applies. It is - // valid to never be set within a stream, e.g. when no resource info is known. - Resource *v12.Resource `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ExportTraceServiceRequest) Reset() { *m = ExportTraceServiceRequest{} } -func (m *ExportTraceServiceRequest) String() string { return proto.CompactTextString(m) } -func (*ExportTraceServiceRequest) ProtoMessage() {} -func (*ExportTraceServiceRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7027f99caf7ac6a5, []int{2} -} - -func (m *ExportTraceServiceRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExportTraceServiceRequest.Unmarshal(m, b) -} -func (m *ExportTraceServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExportTraceServiceRequest.Marshal(b, m, deterministic) -} -func (m *ExportTraceServiceRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExportTraceServiceRequest.Merge(m, src) -} -func (m *ExportTraceServiceRequest) XXX_Size() int { - return xxx_messageInfo_ExportTraceServiceRequest.Size(m) -} -func (m *ExportTraceServiceRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ExportTraceServiceRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ExportTraceServiceRequest proto.InternalMessageInfo - -func (m *ExportTraceServiceRequest) GetNode() *v1.Node { - if m != nil { - return m.Node - } - return nil -} - -func (m *ExportTraceServiceRequest) GetSpans() []*v11.Span { - if m != nil { - return m.Spans - } - return nil -} - -func (m *ExportTraceServiceRequest) GetResource() *v12.Resource { - if m != nil { - return m.Resource - } - return nil -} - -type ExportTraceServiceResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ExportTraceServiceResponse) Reset() { *m = ExportTraceServiceResponse{} } -func (m *ExportTraceServiceResponse) String() string { return proto.CompactTextString(m) } -func (*ExportTraceServiceResponse) ProtoMessage() {} -func (*ExportTraceServiceResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7027f99caf7ac6a5, []int{3} -} - -func (m *ExportTraceServiceResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExportTraceServiceResponse.Unmarshal(m, b) -} -func (m *ExportTraceServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExportTraceServiceResponse.Marshal(b, m, deterministic) -} -func (m *ExportTraceServiceResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExportTraceServiceResponse.Merge(m, src) -} -func (m *ExportTraceServiceResponse) XXX_Size() int { - return xxx_messageInfo_ExportTraceServiceResponse.Size(m) -} -func (m *ExportTraceServiceResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ExportTraceServiceResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ExportTraceServiceResponse proto.InternalMessageInfo - -func init() { - proto.RegisterType((*CurrentLibraryConfig)(nil), "opencensus.proto.agent.trace.v1.CurrentLibraryConfig") - proto.RegisterType((*UpdatedLibraryConfig)(nil), "opencensus.proto.agent.trace.v1.UpdatedLibraryConfig") - proto.RegisterType((*ExportTraceServiceRequest)(nil), "opencensus.proto.agent.trace.v1.ExportTraceServiceRequest") - proto.RegisterType((*ExportTraceServiceResponse)(nil), "opencensus.proto.agent.trace.v1.ExportTraceServiceResponse") -} - -func init() { - proto.RegisterFile("opencensus/proto/agent/trace/v1/trace_service.proto", fileDescriptor_7027f99caf7ac6a5) -} - -var fileDescriptor_7027f99caf7ac6a5 = []byte{ - // 423 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x54, 0xbf, 0x6b, 0xdb, 0x40, - 0x14, 0xee, 0xd9, 0xad, 0x28, 0xe7, 0x2e, 0x15, 0x1d, 0x54, 0x51, 0xb0, 0x11, 0xb4, 0x18, 0x5a, - 0x9d, 0x2a, 0x1b, 0x2f, 0x2e, 0x74, 0xb0, 0x29, 0x74, 0x28, 0xc5, 0xc8, 0xed, 0x92, 0xc5, 0xc8, - 0xd2, 0x8b, 0xa2, 0xc1, 0x77, 0xca, 0xdd, 0x49, 0x24, 0x90, 0x2d, 0x43, 0xf6, 0x0c, 0xf9, 0xc3, - 0xf2, 0x17, 0x05, 0xdd, 0xc9, 0x3f, 0x12, 0x5b, 0x11, 0x24, 0x4b, 0xb6, 0x87, 0xde, 0xf7, 0x7d, - 0xf7, 0xbd, 0x7b, 0xdf, 0x09, 0x0f, 0x59, 0x06, 0x34, 0x02, 0x2a, 0x72, 0xe1, 0x65, 0x9c, 0x49, - 0xe6, 0x85, 0x09, 0x50, 0xe9, 0x49, 0x1e, 0x46, 0xe0, 0x15, 0xbe, 0x2e, 0x16, 0x02, 0x78, 0x91, - 0x46, 0x40, 0x14, 0xc4, 0xec, 0x6e, 0x49, 0xfa, 0x0b, 0x51, 0x24, 0xa2, 0xb0, 0xa4, 0xf0, 0x6d, - 0xb7, 0x46, 0x35, 0x62, 0xab, 0x15, 0xa3, 0xa5, 0xac, 0xae, 0x34, 0xdb, 0xfe, 0xba, 0x07, 0xe7, - 0x20, 0x58, 0xce, 0xb5, 0x83, 0x75, 0x5d, 0x81, 0x3f, 0xef, 0x81, 0xef, 0x7b, 0xad, 0x60, 0xdf, - 0x1a, 0x60, 0x8b, 0x88, 0xd1, 0xe3, 0x34, 0xd1, 0x68, 0xe7, 0x1a, 0xe1, 0x0f, 0xd3, 0x9c, 0x73, - 0xa0, 0xf2, 0x4f, 0xba, 0xe4, 0x21, 0x3f, 0x9f, 0xaa, 0xb6, 0x39, 0xc6, 0xaf, 0x29, 0x8b, 0xc1, - 0x42, 0x3d, 0xd4, 0xef, 0x0c, 0xbe, 0x90, 0x9a, 0xc9, 0xab, 0x71, 0x0a, 0x9f, 0xfc, 0x65, 0x31, - 0x04, 0x8a, 0x63, 0xfe, 0xc4, 0x86, 0x3e, 0xc4, 0x6a, 0xd5, 0xb1, 0xd7, 0x37, 0x46, 0xfe, 0x95, - 0x85, 0x3e, 0x33, 0xa8, 0x58, 0xca, 0xd4, 0xff, 0x2c, 0x0e, 0x25, 0xc4, 0x2f, 0xc7, 0xd4, 0x2d, - 0xc2, 0x1f, 0x7f, 0x9d, 0x65, 0x8c, 0x4b, 0xd5, 0x9d, 0xeb, 0x60, 0x04, 0x70, 0x9a, 0x83, 0x90, - 0xcf, 0x72, 0x36, 0xc2, 0x6f, 0x44, 0x16, 0x52, 0x61, 0xb5, 0x7a, 0xed, 0x7e, 0x67, 0xd0, 0x7d, - 0xc4, 0xd8, 0x3c, 0x0b, 0x69, 0xa0, 0xd1, 0xe6, 0x04, 0xbf, 0x5d, 0x27, 0xc4, 0x6a, 0xd7, 0x1d, - 0xbb, 0xc9, 0x50, 0xe1, 0x93, 0xa0, 0xaa, 0x83, 0x0d, 0xcf, 0xf9, 0x84, 0xed, 0x43, 0x33, 0x89, - 0x8c, 0x51, 0x01, 0x83, 0x9b, 0x16, 0x7e, 0xb7, 0xdb, 0x30, 0x2f, 0xb0, 0x51, 0x6d, 0x62, 0x44, - 0x1a, 0x9e, 0x02, 0x39, 0x94, 0x2a, 0xbb, 0x99, 0x76, 0x68, 0xef, 0xce, 0xab, 0x3e, 0xfa, 0x8e, - 0xcc, 0x2b, 0x84, 0x0d, 0xed, 0xd6, 0x1c, 0x37, 0xea, 0xd4, 0xae, 0xca, 0xfe, 0xf1, 0x24, 0xae, - 0xbe, 0x12, 0xed, 0x64, 0x72, 0x89, 0xb0, 0x93, 0xb2, 0x26, 0x9d, 0xc9, 0xfb, 0x5d, 0x89, 0x59, - 0x89, 0x98, 0xa1, 0xa3, 0xdf, 0x49, 0x2a, 0x4f, 0xf2, 0x65, 0x19, 0x05, 0x4f, 0x93, 0xdd, 0x94, - 0x0a, 0xc9, 0xf3, 0x15, 0x50, 0x19, 0xca, 0x94, 0x51, 0x6f, 0xab, 0xeb, 0xea, 0x17, 0x9c, 0x00, - 0x75, 0x93, 0x87, 0x7f, 0xa8, 0xa5, 0xa1, 0x9a, 0xc3, 0xbb, 0x00, 0x00, 0x00, 0xff, 0xff, 0xcf, - 0x9c, 0x9b, 0xf7, 0xcb, 0x04, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// TraceServiceClient is the client API for TraceService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type TraceServiceClient interface { - // After initialization, this RPC must be kept alive for the entire life of - // the application. The agent pushes configs down to applications via a - // stream. - Config(ctx context.Context, opts ...grpc.CallOption) (TraceService_ConfigClient, error) - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. - Export(ctx context.Context, opts ...grpc.CallOption) (TraceService_ExportClient, error) -} - -type traceServiceClient struct { - cc *grpc.ClientConn -} - -func NewTraceServiceClient(cc *grpc.ClientConn) TraceServiceClient { - return &traceServiceClient{cc} -} - -func (c *traceServiceClient) Config(ctx context.Context, opts ...grpc.CallOption) (TraceService_ConfigClient, error) { - stream, err := c.cc.NewStream(ctx, &_TraceService_serviceDesc.Streams[0], "/opencensus.proto.agent.trace.v1.TraceService/Config", opts...) - if err != nil { - return nil, err - } - x := &traceServiceConfigClient{stream} - return x, nil -} - -type TraceService_ConfigClient interface { - Send(*CurrentLibraryConfig) error - Recv() (*UpdatedLibraryConfig, error) - grpc.ClientStream -} - -type traceServiceConfigClient struct { - grpc.ClientStream -} - -func (x *traceServiceConfigClient) Send(m *CurrentLibraryConfig) error { - return x.ClientStream.SendMsg(m) -} - -func (x *traceServiceConfigClient) Recv() (*UpdatedLibraryConfig, error) { - m := new(UpdatedLibraryConfig) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *traceServiceClient) Export(ctx context.Context, opts ...grpc.CallOption) (TraceService_ExportClient, error) { - stream, err := c.cc.NewStream(ctx, &_TraceService_serviceDesc.Streams[1], "/opencensus.proto.agent.trace.v1.TraceService/Export", opts...) - if err != nil { - return nil, err - } - x := &traceServiceExportClient{stream} - return x, nil -} - -type TraceService_ExportClient interface { - Send(*ExportTraceServiceRequest) error - Recv() (*ExportTraceServiceResponse, error) - grpc.ClientStream -} - -type traceServiceExportClient struct { - grpc.ClientStream -} - -func (x *traceServiceExportClient) Send(m *ExportTraceServiceRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *traceServiceExportClient) Recv() (*ExportTraceServiceResponse, error) { - m := new(ExportTraceServiceResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// TraceServiceServer is the server API for TraceService service. -type TraceServiceServer interface { - // After initialization, this RPC must be kept alive for the entire life of - // the application. The agent pushes configs down to applications via a - // stream. - Config(TraceService_ConfigServer) error - // For performance reasons, it is recommended to keep this RPC - // alive for the entire life of the application. - Export(TraceService_ExportServer) error -} - -func RegisterTraceServiceServer(s *grpc.Server, srv TraceServiceServer) { - s.RegisterService(&_TraceService_serviceDesc, srv) -} - -func _TraceService_Config_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(TraceServiceServer).Config(&traceServiceConfigServer{stream}) -} - -type TraceService_ConfigServer interface { - Send(*UpdatedLibraryConfig) error - Recv() (*CurrentLibraryConfig, error) - grpc.ServerStream -} - -type traceServiceConfigServer struct { - grpc.ServerStream -} - -func (x *traceServiceConfigServer) Send(m *UpdatedLibraryConfig) error { - return x.ServerStream.SendMsg(m) -} - -func (x *traceServiceConfigServer) Recv() (*CurrentLibraryConfig, error) { - m := new(CurrentLibraryConfig) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _TraceService_Export_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(TraceServiceServer).Export(&traceServiceExportServer{stream}) -} - -type TraceService_ExportServer interface { - Send(*ExportTraceServiceResponse) error - Recv() (*ExportTraceServiceRequest, error) - grpc.ServerStream -} - -type traceServiceExportServer struct { - grpc.ServerStream -} - -func (x *traceServiceExportServer) Send(m *ExportTraceServiceResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *traceServiceExportServer) Recv() (*ExportTraceServiceRequest, error) { - m := new(ExportTraceServiceRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -var _TraceService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "opencensus.proto.agent.trace.v1.TraceService", - HandlerType: (*TraceServiceServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "Config", - Handler: _TraceService_Config_Handler, - ServerStreams: true, - ClientStreams: true, - }, - { - StreamName: "Export", - Handler: _TraceService_Export_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "opencensus/proto/agent/trace/v1/trace_service.proto", -} diff --git a/cmd/bpa-operator/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.gw.go b/cmd/bpa-operator/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.gw.go deleted file mode 100644 index bd4b8a8..0000000 --- a/cmd/bpa-operator/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.gw.go +++ /dev/null @@ -1,154 +0,0 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: opencensus/proto/agent/trace/v1/trace_service.proto - -/* -Package v1 is a reverse proxy. - -It translates gRPC into RESTful JSON APIs. -*/ -package v1 - -import ( - "io" - "net/http" - - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "golang.org/x/net/context" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/status" -) - -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray - -func request_TraceService_Export_0(ctx context.Context, marshaler runtime.Marshaler, client TraceServiceClient, req *http.Request, pathParams map[string]string) (TraceService_ExportClient, runtime.ServerMetadata, error) { - var metadata runtime.ServerMetadata - stream, err := client.Export(ctx) - if err != nil { - grpclog.Infof("Failed to start streaming: %v", err) - return nil, metadata, err - } - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, berr - } - dec := marshaler.NewDecoder(newReader()) - handleSend := func() error { - var protoReq ExportTraceServiceRequest - err := dec.Decode(&protoReq) - if err == io.EOF { - return err - } - if err != nil { - grpclog.Infof("Failed to decode request: %v", err) - return err - } - if err := stream.Send(&protoReq); err != nil { - grpclog.Infof("Failed to send request: %v", err) - return err - } - return nil - } - if err := handleSend(); err != nil { - if cerr := stream.CloseSend(); cerr != nil { - grpclog.Infof("Failed to terminate client stream: %v", cerr) - } - if err == io.EOF { - return stream, metadata, nil - } - return nil, metadata, err - } - go func() { - for { - if err := handleSend(); err != nil { - break - } - } - if err := stream.CloseSend(); err != nil { - grpclog.Infof("Failed to terminate client stream: %v", err) - } - }() - header, err := stream.Header() - if err != nil { - grpclog.Infof("Failed to get header from client: %v", err) - return nil, metadata, err - } - metadata.HeaderMD = header - return stream, metadata, nil -} - -// RegisterTraceServiceHandlerFromEndpoint is same as RegisterTraceServiceHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterTraceServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterTraceServiceHandler(ctx, mux, conn) -} - -// RegisterTraceServiceHandler registers the http handlers for service TraceService to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterTraceServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterTraceServiceHandlerClient(ctx, mux, NewTraceServiceClient(conn)) -} - -// RegisterTraceServiceHandlerClient registers the http handlers for service TraceService -// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "TraceServiceClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "TraceServiceClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "TraceServiceClient" to call the correct interceptors. -func RegisterTraceServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client TraceServiceClient) error { - - mux.Handle("POST", pattern_TraceService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_TraceService_Export_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_TraceService_Export_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_TraceService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "trace"}, "")) -) - -var ( - forward_TraceService_Export_0 = runtime.ForwardResponseStream -) diff --git a/cmd/bpa-operator/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1/metrics.pb.go b/cmd/bpa-operator/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1/metrics.pb.go deleted file mode 100644 index 53b8aa9..0000000 --- a/cmd/bpa-operator/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1/metrics.pb.go +++ /dev/null @@ -1,1126 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: opencensus/proto/metrics/v1/metrics.proto - -package v1 - -import ( - fmt "fmt" - v1 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" - proto "github.com/golang/protobuf/proto" - timestamp "github.com/golang/protobuf/ptypes/timestamp" - wrappers "github.com/golang/protobuf/ptypes/wrappers" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -// The kind of metric. It describes how the data is reported. -// -// A gauge is an instantaneous measurement of a value. -// -// A cumulative measurement is a value accumulated over a time interval. In -// a time series, cumulative measurements should have the same start time, -// increasing values and increasing end times, until an event resets the -// cumulative value to zero and sets a new start time for the following -// points. -type MetricDescriptor_Type int32 - -const ( - // Do not use this default value. - MetricDescriptor_UNSPECIFIED MetricDescriptor_Type = 0 - // Integer gauge. The value can go both up and down. - MetricDescriptor_GAUGE_INT64 MetricDescriptor_Type = 1 - // Floating point gauge. The value can go both up and down. - MetricDescriptor_GAUGE_DOUBLE MetricDescriptor_Type = 2 - // Distribution gauge measurement. The count and sum can go both up and - // down. Recorded values are always >= 0. - // Used in scenarios like a snapshot of time the current items in a queue - // have spent there. - MetricDescriptor_GAUGE_DISTRIBUTION MetricDescriptor_Type = 3 - // Integer cumulative measurement. The value cannot decrease, if resets - // then the start_time should also be reset. - MetricDescriptor_CUMULATIVE_INT64 MetricDescriptor_Type = 4 - // Floating point cumulative measurement. The value cannot decrease, if - // resets then the start_time should also be reset. Recorded values are - // always >= 0. - MetricDescriptor_CUMULATIVE_DOUBLE MetricDescriptor_Type = 5 - // Distribution cumulative measurement. The count and sum cannot decrease, - // if resets then the start_time should also be reset. - MetricDescriptor_CUMULATIVE_DISTRIBUTION MetricDescriptor_Type = 6 - // Some frameworks implemented Histograms as a summary of observations - // (usually things like request durations and response sizes). While it - // also provides a total count of observations and a sum of all observed - // values, it calculates configurable percentiles over a sliding time - // window. This is not recommended, since it cannot be aggregated. - MetricDescriptor_SUMMARY MetricDescriptor_Type = 7 -) - -var MetricDescriptor_Type_name = map[int32]string{ - 0: "UNSPECIFIED", - 1: "GAUGE_INT64", - 2: "GAUGE_DOUBLE", - 3: "GAUGE_DISTRIBUTION", - 4: "CUMULATIVE_INT64", - 5: "CUMULATIVE_DOUBLE", - 6: "CUMULATIVE_DISTRIBUTION", - 7: "SUMMARY", -} - -var MetricDescriptor_Type_value = map[string]int32{ - "UNSPECIFIED": 0, - "GAUGE_INT64": 1, - "GAUGE_DOUBLE": 2, - "GAUGE_DISTRIBUTION": 3, - "CUMULATIVE_INT64": 4, - "CUMULATIVE_DOUBLE": 5, - "CUMULATIVE_DISTRIBUTION": 6, - "SUMMARY": 7, -} - -func (x MetricDescriptor_Type) String() string { - return proto.EnumName(MetricDescriptor_Type_name, int32(x)) -} - -func (MetricDescriptor_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_0ee3deb72053811a, []int{1, 0} -} - -// Defines a Metric which has one or more timeseries. -type Metric struct { - // The descriptor of the Metric. - // TODO(issue #152): consider only sending the name of descriptor for - // optimization. - MetricDescriptor *MetricDescriptor `protobuf:"bytes,1,opt,name=metric_descriptor,json=metricDescriptor,proto3" json:"metric_descriptor,omitempty"` - // One or more timeseries for a single metric, where each timeseries has - // one or more points. - Timeseries []*TimeSeries `protobuf:"bytes,2,rep,name=timeseries,proto3" json:"timeseries,omitempty"` - // The resource for the metric. If unset, it may be set to a default value - // provided for a sequence of messages in an RPC stream. - Resource *v1.Resource `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Metric) Reset() { *m = Metric{} } -func (m *Metric) String() string { return proto.CompactTextString(m) } -func (*Metric) ProtoMessage() {} -func (*Metric) Descriptor() ([]byte, []int) { - return fileDescriptor_0ee3deb72053811a, []int{0} -} - -func (m *Metric) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Metric.Unmarshal(m, b) -} -func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Metric.Marshal(b, m, deterministic) -} -func (m *Metric) XXX_Merge(src proto.Message) { - xxx_messageInfo_Metric.Merge(m, src) -} -func (m *Metric) XXX_Size() int { - return xxx_messageInfo_Metric.Size(m) -} -func (m *Metric) XXX_DiscardUnknown() { - xxx_messageInfo_Metric.DiscardUnknown(m) -} - -var xxx_messageInfo_Metric proto.InternalMessageInfo - -func (m *Metric) GetMetricDescriptor() *MetricDescriptor { - if m != nil { - return m.MetricDescriptor - } - return nil -} - -func (m *Metric) GetTimeseries() []*TimeSeries { - if m != nil { - return m.Timeseries - } - return nil -} - -func (m *Metric) GetResource() *v1.Resource { - if m != nil { - return m.Resource - } - return nil -} - -// Defines a metric type and its schema. -type MetricDescriptor struct { - // The metric type, including its DNS name prefix. It must be unique. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // A detailed description of the metric, which can be used in documentation. - Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` - // The unit in which the metric value is reported. Follows the format - // described by http://unitsofmeasure.org/ucum.html. - Unit string `protobuf:"bytes,3,opt,name=unit,proto3" json:"unit,omitempty"` - Type MetricDescriptor_Type `protobuf:"varint,4,opt,name=type,proto3,enum=opencensus.proto.metrics.v1.MetricDescriptor_Type" json:"type,omitempty"` - // The label keys associated with the metric descriptor. - LabelKeys []*LabelKey `protobuf:"bytes,5,rep,name=label_keys,json=labelKeys,proto3" json:"label_keys,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MetricDescriptor) Reset() { *m = MetricDescriptor{} } -func (m *MetricDescriptor) String() string { return proto.CompactTextString(m) } -func (*MetricDescriptor) ProtoMessage() {} -func (*MetricDescriptor) Descriptor() ([]byte, []int) { - return fileDescriptor_0ee3deb72053811a, []int{1} -} - -func (m *MetricDescriptor) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MetricDescriptor.Unmarshal(m, b) -} -func (m *MetricDescriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MetricDescriptor.Marshal(b, m, deterministic) -} -func (m *MetricDescriptor) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricDescriptor.Merge(m, src) -} -func (m *MetricDescriptor) XXX_Size() int { - return xxx_messageInfo_MetricDescriptor.Size(m) -} -func (m *MetricDescriptor) XXX_DiscardUnknown() { - xxx_messageInfo_MetricDescriptor.DiscardUnknown(m) -} - -var xxx_messageInfo_MetricDescriptor proto.InternalMessageInfo - -func (m *MetricDescriptor) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *MetricDescriptor) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *MetricDescriptor) GetUnit() string { - if m != nil { - return m.Unit - } - return "" -} - -func (m *MetricDescriptor) GetType() MetricDescriptor_Type { - if m != nil { - return m.Type - } - return MetricDescriptor_UNSPECIFIED -} - -func (m *MetricDescriptor) GetLabelKeys() []*LabelKey { - if m != nil { - return m.LabelKeys - } - return nil -} - -// Defines a label key associated with a metric descriptor. -type LabelKey struct { - // The key for the label. - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - // A human-readable description of what this label key represents. - Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LabelKey) Reset() { *m = LabelKey{} } -func (m *LabelKey) String() string { return proto.CompactTextString(m) } -func (*LabelKey) ProtoMessage() {} -func (*LabelKey) Descriptor() ([]byte, []int) { - return fileDescriptor_0ee3deb72053811a, []int{2} -} - -func (m *LabelKey) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LabelKey.Unmarshal(m, b) -} -func (m *LabelKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LabelKey.Marshal(b, m, deterministic) -} -func (m *LabelKey) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelKey.Merge(m, src) -} -func (m *LabelKey) XXX_Size() int { - return xxx_messageInfo_LabelKey.Size(m) -} -func (m *LabelKey) XXX_DiscardUnknown() { - xxx_messageInfo_LabelKey.DiscardUnknown(m) -} - -var xxx_messageInfo_LabelKey proto.InternalMessageInfo - -func (m *LabelKey) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -func (m *LabelKey) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -// A collection of data points that describes the time-varying values -// of a metric. -type TimeSeries struct { - // Must be present for cumulative metrics. The time when the cumulative value - // was reset to zero. Exclusive. The cumulative value is over the time interval - // (start_timestamp, timestamp]. If not specified, the backend can use the - // previous recorded value. - StartTimestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=start_timestamp,json=startTimestamp,proto3" json:"start_timestamp,omitempty"` - // The set of label values that uniquely identify this timeseries. Applies to - // all points. The order of label values must match that of label keys in the - // metric descriptor. - LabelValues []*LabelValue `protobuf:"bytes,2,rep,name=label_values,json=labelValues,proto3" json:"label_values,omitempty"` - // The data points of this timeseries. Point.value type MUST match the - // MetricDescriptor.type. - Points []*Point `protobuf:"bytes,3,rep,name=points,proto3" json:"points,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *TimeSeries) Reset() { *m = TimeSeries{} } -func (m *TimeSeries) String() string { return proto.CompactTextString(m) } -func (*TimeSeries) ProtoMessage() {} -func (*TimeSeries) Descriptor() ([]byte, []int) { - return fileDescriptor_0ee3deb72053811a, []int{3} -} - -func (m *TimeSeries) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TimeSeries.Unmarshal(m, b) -} -func (m *TimeSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TimeSeries.Marshal(b, m, deterministic) -} -func (m *TimeSeries) XXX_Merge(src proto.Message) { - xxx_messageInfo_TimeSeries.Merge(m, src) -} -func (m *TimeSeries) XXX_Size() int { - return xxx_messageInfo_TimeSeries.Size(m) -} -func (m *TimeSeries) XXX_DiscardUnknown() { - xxx_messageInfo_TimeSeries.DiscardUnknown(m) -} - -var xxx_messageInfo_TimeSeries proto.InternalMessageInfo - -func (m *TimeSeries) GetStartTimestamp() *timestamp.Timestamp { - if m != nil { - return m.StartTimestamp - } - return nil -} - -func (m *TimeSeries) GetLabelValues() []*LabelValue { - if m != nil { - return m.LabelValues - } - return nil -} - -func (m *TimeSeries) GetPoints() []*Point { - if m != nil { - return m.Points - } - return nil -} - -type LabelValue struct { - // The value for the label. - Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` - // If false the value field is ignored and considered not set. - // This is used to differentiate a missing label from an empty string. - HasValue bool `protobuf:"varint,2,opt,name=has_value,json=hasValue,proto3" json:"has_value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LabelValue) Reset() { *m = LabelValue{} } -func (m *LabelValue) String() string { return proto.CompactTextString(m) } -func (*LabelValue) ProtoMessage() {} -func (*LabelValue) Descriptor() ([]byte, []int) { - return fileDescriptor_0ee3deb72053811a, []int{4} -} - -func (m *LabelValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LabelValue.Unmarshal(m, b) -} -func (m *LabelValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LabelValue.Marshal(b, m, deterministic) -} -func (m *LabelValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelValue.Merge(m, src) -} -func (m *LabelValue) XXX_Size() int { - return xxx_messageInfo_LabelValue.Size(m) -} -func (m *LabelValue) XXX_DiscardUnknown() { - xxx_messageInfo_LabelValue.DiscardUnknown(m) -} - -var xxx_messageInfo_LabelValue proto.InternalMessageInfo - -func (m *LabelValue) GetValue() string { - if m != nil { - return m.Value - } - return "" -} - -func (m *LabelValue) GetHasValue() bool { - if m != nil { - return m.HasValue - } - return false -} - -// A timestamped measurement. -type Point struct { - // The moment when this point was recorded. Inclusive. - // If not specified, the timestamp will be decided by the backend. - Timestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - // The actual point value. - // - // Types that are valid to be assigned to Value: - // *Point_Int64Value - // *Point_DoubleValue - // *Point_DistributionValue - // *Point_SummaryValue - Value isPoint_Value `protobuf_oneof:"value"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Point) Reset() { *m = Point{} } -func (m *Point) String() string { return proto.CompactTextString(m) } -func (*Point) ProtoMessage() {} -func (*Point) Descriptor() ([]byte, []int) { - return fileDescriptor_0ee3deb72053811a, []int{5} -} - -func (m *Point) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Point.Unmarshal(m, b) -} -func (m *Point) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Point.Marshal(b, m, deterministic) -} -func (m *Point) XXX_Merge(src proto.Message) { - xxx_messageInfo_Point.Merge(m, src) -} -func (m *Point) XXX_Size() int { - return xxx_messageInfo_Point.Size(m) -} -func (m *Point) XXX_DiscardUnknown() { - xxx_messageInfo_Point.DiscardUnknown(m) -} - -var xxx_messageInfo_Point proto.InternalMessageInfo - -func (m *Point) GetTimestamp() *timestamp.Timestamp { - if m != nil { - return m.Timestamp - } - return nil -} - -type isPoint_Value interface { - isPoint_Value() -} - -type Point_Int64Value struct { - Int64Value int64 `protobuf:"varint,2,opt,name=int64_value,json=int64Value,proto3,oneof"` -} - -type Point_DoubleValue struct { - DoubleValue float64 `protobuf:"fixed64,3,opt,name=double_value,json=doubleValue,proto3,oneof"` -} - -type Point_DistributionValue struct { - DistributionValue *DistributionValue `protobuf:"bytes,4,opt,name=distribution_value,json=distributionValue,proto3,oneof"` -} - -type Point_SummaryValue struct { - SummaryValue *SummaryValue `protobuf:"bytes,5,opt,name=summary_value,json=summaryValue,proto3,oneof"` -} - -func (*Point_Int64Value) isPoint_Value() {} - -func (*Point_DoubleValue) isPoint_Value() {} - -func (*Point_DistributionValue) isPoint_Value() {} - -func (*Point_SummaryValue) isPoint_Value() {} - -func (m *Point) GetValue() isPoint_Value { - if m != nil { - return m.Value - } - return nil -} - -func (m *Point) GetInt64Value() int64 { - if x, ok := m.GetValue().(*Point_Int64Value); ok { - return x.Int64Value - } - return 0 -} - -func (m *Point) GetDoubleValue() float64 { - if x, ok := m.GetValue().(*Point_DoubleValue); ok { - return x.DoubleValue - } - return 0 -} - -func (m *Point) GetDistributionValue() *DistributionValue { - if x, ok := m.GetValue().(*Point_DistributionValue); ok { - return x.DistributionValue - } - return nil -} - -func (m *Point) GetSummaryValue() *SummaryValue { - if x, ok := m.GetValue().(*Point_SummaryValue); ok { - return x.SummaryValue - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*Point) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*Point_Int64Value)(nil), - (*Point_DoubleValue)(nil), - (*Point_DistributionValue)(nil), - (*Point_SummaryValue)(nil), - } -} - -// Distribution contains summary statistics for a population of values. It -// optionally contains a histogram representing the distribution of those -// values across a set of buckets. -type DistributionValue struct { - // The number of values in the population. Must be non-negative. This value - // must equal the sum of the values in bucket_counts if a histogram is - // provided. - Count int64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` - // The sum of the values in the population. If count is zero then this field - // must be zero. - Sum float64 `protobuf:"fixed64,2,opt,name=sum,proto3" json:"sum,omitempty"` - // The sum of squared deviations from the mean of the values in the - // population. For values x_i this is: - // - // Sum[i=1..n]((x_i - mean)^2) - // - // Knuth, "The Art of Computer Programming", Vol. 2, page 323, 3rd edition - // describes Welford's method for accumulating this sum in one pass. - // - // If count is zero then this field must be zero. - SumOfSquaredDeviation float64 `protobuf:"fixed64,3,opt,name=sum_of_squared_deviation,json=sumOfSquaredDeviation,proto3" json:"sum_of_squared_deviation,omitempty"` - // Don't change bucket boundaries within a TimeSeries if your backend doesn't - // support this. - // TODO(issue #152): consider not required to send bucket options for - // optimization. - BucketOptions *DistributionValue_BucketOptions `protobuf:"bytes,4,opt,name=bucket_options,json=bucketOptions,proto3" json:"bucket_options,omitempty"` - // If the distribution does not have a histogram, then omit this field. - // If there is a histogram, then the sum of the values in the Bucket counts - // must equal the value in the count field of the distribution. - Buckets []*DistributionValue_Bucket `protobuf:"bytes,5,rep,name=buckets,proto3" json:"buckets,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DistributionValue) Reset() { *m = DistributionValue{} } -func (m *DistributionValue) String() string { return proto.CompactTextString(m) } -func (*DistributionValue) ProtoMessage() {} -func (*DistributionValue) Descriptor() ([]byte, []int) { - return fileDescriptor_0ee3deb72053811a, []int{6} -} - -func (m *DistributionValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DistributionValue.Unmarshal(m, b) -} -func (m *DistributionValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DistributionValue.Marshal(b, m, deterministic) -} -func (m *DistributionValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_DistributionValue.Merge(m, src) -} -func (m *DistributionValue) XXX_Size() int { - return xxx_messageInfo_DistributionValue.Size(m) -} -func (m *DistributionValue) XXX_DiscardUnknown() { - xxx_messageInfo_DistributionValue.DiscardUnknown(m) -} - -var xxx_messageInfo_DistributionValue proto.InternalMessageInfo - -func (m *DistributionValue) GetCount() int64 { - if m != nil { - return m.Count - } - return 0 -} - -func (m *DistributionValue) GetSum() float64 { - if m != nil { - return m.Sum - } - return 0 -} - -func (m *DistributionValue) GetSumOfSquaredDeviation() float64 { - if m != nil { - return m.SumOfSquaredDeviation - } - return 0 -} - -func (m *DistributionValue) GetBucketOptions() *DistributionValue_BucketOptions { - if m != nil { - return m.BucketOptions - } - return nil -} - -func (m *DistributionValue) GetBuckets() []*DistributionValue_Bucket { - if m != nil { - return m.Buckets - } - return nil -} - -// A Distribution may optionally contain a histogram of the values in the -// population. The bucket boundaries for that histogram are described by -// BucketOptions. -// -// If bucket_options has no type, then there is no histogram associated with -// the Distribution. -type DistributionValue_BucketOptions struct { - // Types that are valid to be assigned to Type: - // *DistributionValue_BucketOptions_Explicit_ - Type isDistributionValue_BucketOptions_Type `protobuf_oneof:"type"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DistributionValue_BucketOptions) Reset() { *m = DistributionValue_BucketOptions{} } -func (m *DistributionValue_BucketOptions) String() string { return proto.CompactTextString(m) } -func (*DistributionValue_BucketOptions) ProtoMessage() {} -func (*DistributionValue_BucketOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_0ee3deb72053811a, []int{6, 0} -} - -func (m *DistributionValue_BucketOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DistributionValue_BucketOptions.Unmarshal(m, b) -} -func (m *DistributionValue_BucketOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DistributionValue_BucketOptions.Marshal(b, m, deterministic) -} -func (m *DistributionValue_BucketOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_DistributionValue_BucketOptions.Merge(m, src) -} -func (m *DistributionValue_BucketOptions) XXX_Size() int { - return xxx_messageInfo_DistributionValue_BucketOptions.Size(m) -} -func (m *DistributionValue_BucketOptions) XXX_DiscardUnknown() { - xxx_messageInfo_DistributionValue_BucketOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_DistributionValue_BucketOptions proto.InternalMessageInfo - -type isDistributionValue_BucketOptions_Type interface { - isDistributionValue_BucketOptions_Type() -} - -type DistributionValue_BucketOptions_Explicit_ struct { - Explicit *DistributionValue_BucketOptions_Explicit `protobuf:"bytes,1,opt,name=explicit,proto3,oneof"` -} - -func (*DistributionValue_BucketOptions_Explicit_) isDistributionValue_BucketOptions_Type() {} - -func (m *DistributionValue_BucketOptions) GetType() isDistributionValue_BucketOptions_Type { - if m != nil { - return m.Type - } - return nil -} - -func (m *DistributionValue_BucketOptions) GetExplicit() *DistributionValue_BucketOptions_Explicit { - if x, ok := m.GetType().(*DistributionValue_BucketOptions_Explicit_); ok { - return x.Explicit - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*DistributionValue_BucketOptions) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*DistributionValue_BucketOptions_Explicit_)(nil), - } -} - -// Specifies a set of buckets with arbitrary upper-bounds. -// This defines size(bounds) + 1 (= N) buckets. The boundaries for bucket -// index i are: -// -// [0, bucket_bounds[i]) for i == 0 -// [bucket_bounds[i-1], bucket_bounds[i]) for 0 < i < N-1 -// [bucket_bounds[i], +infinity) for i == N-1 -type DistributionValue_BucketOptions_Explicit struct { - // The values must be strictly increasing and > 0. - Bounds []float64 `protobuf:"fixed64,1,rep,packed,name=bounds,proto3" json:"bounds,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DistributionValue_BucketOptions_Explicit) Reset() { - *m = DistributionValue_BucketOptions_Explicit{} -} -func (m *DistributionValue_BucketOptions_Explicit) String() string { return proto.CompactTextString(m) } -func (*DistributionValue_BucketOptions_Explicit) ProtoMessage() {} -func (*DistributionValue_BucketOptions_Explicit) Descriptor() ([]byte, []int) { - return fileDescriptor_0ee3deb72053811a, []int{6, 0, 0} -} - -func (m *DistributionValue_BucketOptions_Explicit) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DistributionValue_BucketOptions_Explicit.Unmarshal(m, b) -} -func (m *DistributionValue_BucketOptions_Explicit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DistributionValue_BucketOptions_Explicit.Marshal(b, m, deterministic) -} -func (m *DistributionValue_BucketOptions_Explicit) XXX_Merge(src proto.Message) { - xxx_messageInfo_DistributionValue_BucketOptions_Explicit.Merge(m, src) -} -func (m *DistributionValue_BucketOptions_Explicit) XXX_Size() int { - return xxx_messageInfo_DistributionValue_BucketOptions_Explicit.Size(m) -} -func (m *DistributionValue_BucketOptions_Explicit) XXX_DiscardUnknown() { - xxx_messageInfo_DistributionValue_BucketOptions_Explicit.DiscardUnknown(m) -} - -var xxx_messageInfo_DistributionValue_BucketOptions_Explicit proto.InternalMessageInfo - -func (m *DistributionValue_BucketOptions_Explicit) GetBounds() []float64 { - if m != nil { - return m.Bounds - } - return nil -} - -type DistributionValue_Bucket struct { - // The number of values in each bucket of the histogram, as described in - // bucket_bounds. - Count int64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` - // If the distribution does not have a histogram, then omit this field. - Exemplar *DistributionValue_Exemplar `protobuf:"bytes,2,opt,name=exemplar,proto3" json:"exemplar,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DistributionValue_Bucket) Reset() { *m = DistributionValue_Bucket{} } -func (m *DistributionValue_Bucket) String() string { return proto.CompactTextString(m) } -func (*DistributionValue_Bucket) ProtoMessage() {} -func (*DistributionValue_Bucket) Descriptor() ([]byte, []int) { - return fileDescriptor_0ee3deb72053811a, []int{6, 1} -} - -func (m *DistributionValue_Bucket) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DistributionValue_Bucket.Unmarshal(m, b) -} -func (m *DistributionValue_Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DistributionValue_Bucket.Marshal(b, m, deterministic) -} -func (m *DistributionValue_Bucket) XXX_Merge(src proto.Message) { - xxx_messageInfo_DistributionValue_Bucket.Merge(m, src) -} -func (m *DistributionValue_Bucket) XXX_Size() int { - return xxx_messageInfo_DistributionValue_Bucket.Size(m) -} -func (m *DistributionValue_Bucket) XXX_DiscardUnknown() { - xxx_messageInfo_DistributionValue_Bucket.DiscardUnknown(m) -} - -var xxx_messageInfo_DistributionValue_Bucket proto.InternalMessageInfo - -func (m *DistributionValue_Bucket) GetCount() int64 { - if m != nil { - return m.Count - } - return 0 -} - -func (m *DistributionValue_Bucket) GetExemplar() *DistributionValue_Exemplar { - if m != nil { - return m.Exemplar - } - return nil -} - -// Exemplars are example points that may be used to annotate aggregated -// Distribution values. They are metadata that gives information about a -// particular value added to a Distribution bucket. -type DistributionValue_Exemplar struct { - // Value of the exemplar point. It determines which bucket the exemplar - // belongs to. - Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` - // The observation (sampling) time of the above value. - Timestamp *timestamp.Timestamp `protobuf:"bytes,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - // Contextual information about the example value. - Attachments map[string]string `protobuf:"bytes,3,rep,name=attachments,proto3" json:"attachments,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DistributionValue_Exemplar) Reset() { *m = DistributionValue_Exemplar{} } -func (m *DistributionValue_Exemplar) String() string { return proto.CompactTextString(m) } -func (*DistributionValue_Exemplar) ProtoMessage() {} -func (*DistributionValue_Exemplar) Descriptor() ([]byte, []int) { - return fileDescriptor_0ee3deb72053811a, []int{6, 2} -} - -func (m *DistributionValue_Exemplar) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DistributionValue_Exemplar.Unmarshal(m, b) -} -func (m *DistributionValue_Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DistributionValue_Exemplar.Marshal(b, m, deterministic) -} -func (m *DistributionValue_Exemplar) XXX_Merge(src proto.Message) { - xxx_messageInfo_DistributionValue_Exemplar.Merge(m, src) -} -func (m *DistributionValue_Exemplar) XXX_Size() int { - return xxx_messageInfo_DistributionValue_Exemplar.Size(m) -} -func (m *DistributionValue_Exemplar) XXX_DiscardUnknown() { - xxx_messageInfo_DistributionValue_Exemplar.DiscardUnknown(m) -} - -var xxx_messageInfo_DistributionValue_Exemplar proto.InternalMessageInfo - -func (m *DistributionValue_Exemplar) GetValue() float64 { - if m != nil { - return m.Value - } - return 0 -} - -func (m *DistributionValue_Exemplar) GetTimestamp() *timestamp.Timestamp { - if m != nil { - return m.Timestamp - } - return nil -} - -func (m *DistributionValue_Exemplar) GetAttachments() map[string]string { - if m != nil { - return m.Attachments - } - return nil -} - -// The start_timestamp only applies to the count and sum in the SummaryValue. -type SummaryValue struct { - // The total number of recorded values since start_time. Optional since - // some systems don't expose this. - Count *wrappers.Int64Value `protobuf:"bytes,1,opt,name=count,proto3" json:"count,omitempty"` - // The total sum of recorded values since start_time. Optional since some - // systems don't expose this. If count is zero then this field must be zero. - // This field must be unset if the sum is not available. - Sum *wrappers.DoubleValue `protobuf:"bytes,2,opt,name=sum,proto3" json:"sum,omitempty"` - // Values calculated over an arbitrary time window. - Snapshot *SummaryValue_Snapshot `protobuf:"bytes,3,opt,name=snapshot,proto3" json:"snapshot,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SummaryValue) Reset() { *m = SummaryValue{} } -func (m *SummaryValue) String() string { return proto.CompactTextString(m) } -func (*SummaryValue) ProtoMessage() {} -func (*SummaryValue) Descriptor() ([]byte, []int) { - return fileDescriptor_0ee3deb72053811a, []int{7} -} - -func (m *SummaryValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SummaryValue.Unmarshal(m, b) -} -func (m *SummaryValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SummaryValue.Marshal(b, m, deterministic) -} -func (m *SummaryValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_SummaryValue.Merge(m, src) -} -func (m *SummaryValue) XXX_Size() int { - return xxx_messageInfo_SummaryValue.Size(m) -} -func (m *SummaryValue) XXX_DiscardUnknown() { - xxx_messageInfo_SummaryValue.DiscardUnknown(m) -} - -var xxx_messageInfo_SummaryValue proto.InternalMessageInfo - -func (m *SummaryValue) GetCount() *wrappers.Int64Value { - if m != nil { - return m.Count - } - return nil -} - -func (m *SummaryValue) GetSum() *wrappers.DoubleValue { - if m != nil { - return m.Sum - } - return nil -} - -func (m *SummaryValue) GetSnapshot() *SummaryValue_Snapshot { - if m != nil { - return m.Snapshot - } - return nil -} - -// The values in this message can be reset at arbitrary unknown times, with -// the requirement that all of them are reset at the same time. -type SummaryValue_Snapshot struct { - // The number of values in the snapshot. Optional since some systems don't - // expose this. - Count *wrappers.Int64Value `protobuf:"bytes,1,opt,name=count,proto3" json:"count,omitempty"` - // The sum of values in the snapshot. Optional since some systems don't - // expose this. If count is zero then this field must be zero or not set - // (if not supported). - Sum *wrappers.DoubleValue `protobuf:"bytes,2,opt,name=sum,proto3" json:"sum,omitempty"` - // A list of values at different percentiles of the distribution calculated - // from the current snapshot. The percentiles must be strictly increasing. - PercentileValues []*SummaryValue_Snapshot_ValueAtPercentile `protobuf:"bytes,3,rep,name=percentile_values,json=percentileValues,proto3" json:"percentile_values,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SummaryValue_Snapshot) Reset() { *m = SummaryValue_Snapshot{} } -func (m *SummaryValue_Snapshot) String() string { return proto.CompactTextString(m) } -func (*SummaryValue_Snapshot) ProtoMessage() {} -func (*SummaryValue_Snapshot) Descriptor() ([]byte, []int) { - return fileDescriptor_0ee3deb72053811a, []int{7, 0} -} - -func (m *SummaryValue_Snapshot) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SummaryValue_Snapshot.Unmarshal(m, b) -} -func (m *SummaryValue_Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SummaryValue_Snapshot.Marshal(b, m, deterministic) -} -func (m *SummaryValue_Snapshot) XXX_Merge(src proto.Message) { - xxx_messageInfo_SummaryValue_Snapshot.Merge(m, src) -} -func (m *SummaryValue_Snapshot) XXX_Size() int { - return xxx_messageInfo_SummaryValue_Snapshot.Size(m) -} -func (m *SummaryValue_Snapshot) XXX_DiscardUnknown() { - xxx_messageInfo_SummaryValue_Snapshot.DiscardUnknown(m) -} - -var xxx_messageInfo_SummaryValue_Snapshot proto.InternalMessageInfo - -func (m *SummaryValue_Snapshot) GetCount() *wrappers.Int64Value { - if m != nil { - return m.Count - } - return nil -} - -func (m *SummaryValue_Snapshot) GetSum() *wrappers.DoubleValue { - if m != nil { - return m.Sum - } - return nil -} - -func (m *SummaryValue_Snapshot) GetPercentileValues() []*SummaryValue_Snapshot_ValueAtPercentile { - if m != nil { - return m.PercentileValues - } - return nil -} - -// Represents the value at a given percentile of a distribution. -type SummaryValue_Snapshot_ValueAtPercentile struct { - // The percentile of a distribution. Must be in the interval - // (0.0, 100.0]. - Percentile float64 `protobuf:"fixed64,1,opt,name=percentile,proto3" json:"percentile,omitempty"` - // The value at the given percentile of a distribution. - Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SummaryValue_Snapshot_ValueAtPercentile) Reset() { - *m = SummaryValue_Snapshot_ValueAtPercentile{} -} -func (m *SummaryValue_Snapshot_ValueAtPercentile) String() string { return proto.CompactTextString(m) } -func (*SummaryValue_Snapshot_ValueAtPercentile) ProtoMessage() {} -func (*SummaryValue_Snapshot_ValueAtPercentile) Descriptor() ([]byte, []int) { - return fileDescriptor_0ee3deb72053811a, []int{7, 0, 0} -} - -func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.Unmarshal(m, b) -} -func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.Marshal(b, m, deterministic) -} -func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_Merge(src proto.Message) { - xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.Merge(m, src) -} -func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_Size() int { - return xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.Size(m) -} -func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_DiscardUnknown() { - xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.DiscardUnknown(m) -} - -var xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile proto.InternalMessageInfo - -func (m *SummaryValue_Snapshot_ValueAtPercentile) GetPercentile() float64 { - if m != nil { - return m.Percentile - } - return 0 -} - -func (m *SummaryValue_Snapshot_ValueAtPercentile) GetValue() float64 { - if m != nil { - return m.Value - } - return 0 -} - -func init() { - proto.RegisterEnum("opencensus.proto.metrics.v1.MetricDescriptor_Type", MetricDescriptor_Type_name, MetricDescriptor_Type_value) - proto.RegisterType((*Metric)(nil), "opencensus.proto.metrics.v1.Metric") - proto.RegisterType((*MetricDescriptor)(nil), "opencensus.proto.metrics.v1.MetricDescriptor") - proto.RegisterType((*LabelKey)(nil), "opencensus.proto.metrics.v1.LabelKey") - proto.RegisterType((*TimeSeries)(nil), "opencensus.proto.metrics.v1.TimeSeries") - proto.RegisterType((*LabelValue)(nil), "opencensus.proto.metrics.v1.LabelValue") - proto.RegisterType((*Point)(nil), "opencensus.proto.metrics.v1.Point") - proto.RegisterType((*DistributionValue)(nil), "opencensus.proto.metrics.v1.DistributionValue") - proto.RegisterType((*DistributionValue_BucketOptions)(nil), "opencensus.proto.metrics.v1.DistributionValue.BucketOptions") - proto.RegisterType((*DistributionValue_BucketOptions_Explicit)(nil), "opencensus.proto.metrics.v1.DistributionValue.BucketOptions.Explicit") - proto.RegisterType((*DistributionValue_Bucket)(nil), "opencensus.proto.metrics.v1.DistributionValue.Bucket") - proto.RegisterType((*DistributionValue_Exemplar)(nil), "opencensus.proto.metrics.v1.DistributionValue.Exemplar") - proto.RegisterMapType((map[string]string)(nil), "opencensus.proto.metrics.v1.DistributionValue.Exemplar.AttachmentsEntry") - proto.RegisterType((*SummaryValue)(nil), "opencensus.proto.metrics.v1.SummaryValue") - proto.RegisterType((*SummaryValue_Snapshot)(nil), "opencensus.proto.metrics.v1.SummaryValue.Snapshot") - proto.RegisterType((*SummaryValue_Snapshot_ValueAtPercentile)(nil), "opencensus.proto.metrics.v1.SummaryValue.Snapshot.ValueAtPercentile") -} - -func init() { - proto.RegisterFile("opencensus/proto/metrics/v1/metrics.proto", fileDescriptor_0ee3deb72053811a) -} - -var fileDescriptor_0ee3deb72053811a = []byte{ - // 1098 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0xdd, 0x6e, 0x1b, 0xc5, - 0x17, 0xcf, 0xda, 0x8e, 0xe3, 0x9c, 0x75, 0xdb, 0xf5, 0xa8, 0xed, 0xdf, 0xda, 0xfc, 0x15, 0xc2, - 0x22, 0x20, 0x15, 0xca, 0x5a, 0x31, 0xa5, 0xad, 0x2a, 0x54, 0x14, 0xc7, 0x6e, 0x62, 0xc8, 0x87, - 0x35, 0xb6, 0x2b, 0xd1, 0x1b, 0x6b, 0xbd, 0x9e, 0x24, 0x4b, 0xbc, 0x1f, 0xdd, 0x99, 0x35, 0xf8, - 0x05, 0x78, 0x04, 0xae, 0xb9, 0x45, 0x3c, 0x07, 0x57, 0x3c, 0x01, 0x4f, 0x81, 0x78, 0x03, 0xb4, - 0x33, 0xb3, 0x1f, 0x89, 0xc1, 0xd4, 0x45, 0xe2, 0xee, 0x9c, 0x33, 0xe7, 0xfc, 0xfc, 0x3b, 0x9f, - 0x5e, 0x78, 0xe4, 0x07, 0xc4, 0xb3, 0x89, 0x47, 0x23, 0xda, 0x08, 0x42, 0x9f, 0xf9, 0x0d, 0x97, - 0xb0, 0xd0, 0xb1, 0x69, 0x63, 0xb6, 0x9f, 0x88, 0x26, 0x7f, 0x40, 0x5b, 0x99, 0xab, 0xb0, 0x98, - 0xc9, 0xfb, 0x6c, 0x5f, 0x7f, 0xef, 0xd2, 0xf7, 0x2f, 0xa7, 0x44, 0x60, 0x8c, 0xa3, 0x8b, 0x06, - 0x73, 0x5c, 0x42, 0x99, 0xe5, 0x06, 0xc2, 0x57, 0xdf, 0xbe, 0xed, 0xf0, 0x6d, 0x68, 0x05, 0x01, - 0x09, 0x25, 0x96, 0xfe, 0xc9, 0x02, 0x91, 0x90, 0x50, 0x3f, 0x0a, 0x6d, 0x12, 0x33, 0x49, 0x64, - 0xe1, 0x6c, 0xfc, 0xa1, 0x40, 0xf9, 0x94, 0xff, 0x38, 0x7a, 0x0d, 0x35, 0x41, 0x63, 0x34, 0x21, - 0xd4, 0x0e, 0x9d, 0x80, 0xf9, 0x61, 0x5d, 0xd9, 0x51, 0x76, 0xd5, 0xe6, 0x9e, 0xb9, 0x84, 0xb1, - 0x29, 0xe2, 0xdb, 0x69, 0x10, 0xd6, 0xdc, 0x5b, 0x16, 0x74, 0x04, 0xc0, 0xd3, 0x20, 0xa1, 0x43, - 0x68, 0xbd, 0xb0, 0x53, 0xdc, 0x55, 0x9b, 0x1f, 0x2f, 0x05, 0x1d, 0x38, 0x2e, 0xe9, 0x73, 0x77, - 0x9c, 0x0b, 0x45, 0x2d, 0xa8, 0x24, 0x19, 0xd4, 0x8b, 0x9c, 0xdb, 0x47, 0x8b, 0x30, 0x69, 0x8e, - 0xb3, 0x7d, 0x13, 0x4b, 0x19, 0xa7, 0x71, 0xc6, 0x0f, 0x45, 0xd0, 0x6e, 0x73, 0x46, 0x08, 0x4a, - 0x9e, 0xe5, 0x12, 0x9e, 0xf0, 0x26, 0xe6, 0x32, 0xda, 0x01, 0x35, 0x29, 0x85, 0xe3, 0x7b, 0xf5, - 0x02, 0x7f, 0xca, 0x9b, 0xe2, 0xa8, 0xc8, 0x73, 0x18, 0xa7, 0xb2, 0x89, 0xb9, 0x8c, 0x5e, 0x42, - 0x89, 0xcd, 0x03, 0x52, 0x2f, 0xed, 0x28, 0xbb, 0x77, 0x9b, 0xcd, 0x95, 0x4a, 0x67, 0x0e, 0xe6, - 0x01, 0xc1, 0x3c, 0x1e, 0xb5, 0x01, 0xa6, 0xd6, 0x98, 0x4c, 0x47, 0xd7, 0x64, 0x4e, 0xeb, 0xeb, - 0xbc, 0x66, 0x1f, 0x2e, 0x45, 0x3b, 0x89, 0xdd, 0xbf, 0x22, 0x73, 0xbc, 0x39, 0x95, 0x12, 0x35, - 0x7e, 0x52, 0xa0, 0x14, 0x83, 0xa2, 0x7b, 0xa0, 0x0e, 0xcf, 0xfa, 0xbd, 0xce, 0x61, 0xf7, 0x65, - 0xb7, 0xd3, 0xd6, 0xd6, 0x62, 0xc3, 0xd1, 0xc1, 0xf0, 0xa8, 0x33, 0xea, 0x9e, 0x0d, 0x9e, 0x3c, - 0xd6, 0x14, 0xa4, 0x41, 0x55, 0x18, 0xda, 0xe7, 0xc3, 0xd6, 0x49, 0x47, 0x2b, 0xa0, 0x87, 0x80, - 0xa4, 0xa5, 0xdb, 0x1f, 0xe0, 0x6e, 0x6b, 0x38, 0xe8, 0x9e, 0x9f, 0x69, 0x45, 0x74, 0x1f, 0xb4, - 0xc3, 0xe1, 0xe9, 0xf0, 0xe4, 0x60, 0xd0, 0x7d, 0x95, 0xc4, 0x97, 0xd0, 0x03, 0xa8, 0xe5, 0xac, - 0x12, 0x64, 0x1d, 0x6d, 0xc1, 0xff, 0xf2, 0xe6, 0x3c, 0x52, 0x19, 0xa9, 0xb0, 0xd1, 0x1f, 0x9e, - 0x9e, 0x1e, 0xe0, 0xaf, 0xb5, 0x0d, 0xe3, 0x05, 0x54, 0x92, 0x14, 0x90, 0x06, 0xc5, 0x6b, 0x32, - 0x97, 0xed, 0x88, 0xc5, 0x7f, 0xee, 0x86, 0xf1, 0x9b, 0x02, 0x90, 0xcd, 0x0d, 0x3a, 0x84, 0x7b, - 0x94, 0x59, 0x21, 0x1b, 0xa5, 0x1b, 0x24, 0xc7, 0x59, 0x37, 0xc5, 0x0a, 0x99, 0xc9, 0x0a, 0xf1, - 0x69, 0xe3, 0x1e, 0xf8, 0x2e, 0x0f, 0x49, 0x75, 0xf4, 0x25, 0x54, 0x45, 0x17, 0x66, 0xd6, 0x34, - 0x7a, 0xcb, 0xd9, 0xe5, 0x49, 0xbc, 0x8a, 0xfd, 0xb1, 0x3a, 0x4d, 0x65, 0x8a, 0x9e, 0x43, 0x39, - 0xf0, 0x1d, 0x8f, 0xd1, 0x7a, 0x91, 0xa3, 0x18, 0x4b, 0x51, 0x7a, 0xb1, 0x2b, 0x96, 0x11, 0xc6, - 0x17, 0x00, 0x19, 0x2c, 0xba, 0x0f, 0xeb, 0x9c, 0x8f, 0xac, 0x8f, 0x50, 0xd0, 0x16, 0x6c, 0x5e, - 0x59, 0x54, 0x30, 0xe5, 0xf5, 0xa9, 0xe0, 0xca, 0x95, 0x45, 0x79, 0x88, 0xf1, 0x4b, 0x01, 0xd6, - 0x39, 0x24, 0x7a, 0x06, 0x9b, 0xab, 0x54, 0x24, 0x73, 0x46, 0xef, 0x83, 0xea, 0x78, 0xec, 0xc9, - 0xe3, 0xdc, 0x4f, 0x14, 0x8f, 0xd7, 0x30, 0x70, 0xa3, 0x60, 0xf6, 0x01, 0x54, 0x27, 0x7e, 0x34, - 0x9e, 0x12, 0xe9, 0x13, 0x6f, 0x86, 0x72, 0xbc, 0x86, 0x55, 0x61, 0x15, 0x4e, 0x23, 0x40, 0x13, - 0x87, 0xb2, 0xd0, 0x19, 0x47, 0x71, 0xe3, 0xa4, 0x6b, 0x89, 0x53, 0x31, 0x97, 0x16, 0xa5, 0x9d, - 0x0b, 0xe3, 0x58, 0xc7, 0x6b, 0xb8, 0x36, 0xb9, 0x6d, 0x44, 0x3d, 0xb8, 0x43, 0x23, 0xd7, 0xb5, - 0xc2, 0xb9, 0xc4, 0x5e, 0xe7, 0xd8, 0x8f, 0x96, 0x62, 0xf7, 0x45, 0x44, 0x02, 0x5b, 0xa5, 0x39, - 0xbd, 0xb5, 0x21, 0x2b, 0x6e, 0xfc, 0x5a, 0x86, 0xda, 0x02, 0x8b, 0xb8, 0x21, 0xb6, 0x1f, 0x79, - 0x8c, 0xd7, 0xb3, 0x88, 0x85, 0x12, 0x0f, 0x31, 0x8d, 0x5c, 0x5e, 0x27, 0x05, 0xc7, 0x22, 0x7a, - 0x0a, 0x75, 0x1a, 0xb9, 0x23, 0xff, 0x62, 0x44, 0xdf, 0x44, 0x56, 0x48, 0x26, 0xa3, 0x09, 0x99, - 0x39, 0x16, 0x9f, 0x68, 0x5e, 0x2a, 0xfc, 0x80, 0x46, 0xee, 0xf9, 0x45, 0x5f, 0xbc, 0xb6, 0x93, - 0x47, 0x64, 0xc3, 0xdd, 0x71, 0x64, 0x5f, 0x13, 0x36, 0xf2, 0xf9, 0xb0, 0x53, 0x59, 0xae, 0xcf, - 0x57, 0x2b, 0x97, 0xd9, 0xe2, 0x20, 0xe7, 0x02, 0x03, 0xdf, 0x19, 0xe7, 0x55, 0x74, 0x0e, 0x1b, - 0xc2, 0x90, 0xdc, 0x9b, 0xcf, 0xde, 0x09, 0x1d, 0x27, 0x28, 0xfa, 0x8f, 0x0a, 0xdc, 0xb9, 0xf1, - 0x8b, 0xc8, 0x86, 0x0a, 0xf9, 0x2e, 0x98, 0x3a, 0xb6, 0xc3, 0xe4, 0xec, 0x75, 0xfe, 0x4d, 0x06, - 0x66, 0x47, 0x82, 0x1d, 0xaf, 0xe1, 0x14, 0x58, 0x37, 0xa0, 0x92, 0xd8, 0xd1, 0x43, 0x28, 0x8f, - 0xfd, 0xc8, 0x9b, 0xd0, 0xba, 0xb2, 0x53, 0xdc, 0x55, 0xb0, 0xd4, 0x5a, 0x65, 0x71, 0xa6, 0x75, - 0x0a, 0x65, 0x81, 0xf8, 0x37, 0x3d, 0xec, 0xc7, 0x84, 0x89, 0x1b, 0x4c, 0xad, 0x90, 0x37, 0x52, - 0x6d, 0x3e, 0x5d, 0x91, 0x70, 0x47, 0x86, 0xe3, 0x14, 0x48, 0xff, 0xbe, 0x10, 0x33, 0x14, 0xca, - 0xcd, 0x65, 0x56, 0x92, 0x65, 0xbe, 0xb1, 0xa5, 0x85, 0x55, 0xb6, 0xf4, 0x1b, 0x50, 0x2d, 0xc6, - 0x2c, 0xfb, 0xca, 0x25, 0xd9, 0xad, 0x39, 0x7e, 0x47, 0xd2, 0xe6, 0x41, 0x06, 0xd5, 0xf1, 0x58, - 0x38, 0xc7, 0x79, 0x70, 0xfd, 0x05, 0x68, 0xb7, 0x1d, 0xfe, 0xe2, 0x74, 0xa7, 0x19, 0x16, 0x72, - 0xe7, 0xea, 0x79, 0xe1, 0x99, 0x62, 0xfc, 0x5e, 0x84, 0x6a, 0x7e, 0xef, 0xd0, 0x7e, 0xbe, 0x09, - 0x6a, 0x73, 0x6b, 0x21, 0xe5, 0x6e, 0x7a, 0x6b, 0x92, 0x0e, 0x99, 0xd9, 0x96, 0xa9, 0xcd, 0xff, - 0x2f, 0x04, 0xb4, 0xb3, 0xc3, 0x23, 0x76, 0xf0, 0x0c, 0x2a, 0xd4, 0xb3, 0x02, 0x7a, 0xe5, 0x33, - 0xf9, 0x0d, 0xd1, 0x7c, 0xeb, 0xbb, 0x60, 0xf6, 0x65, 0x24, 0x4e, 0x31, 0xf4, 0x9f, 0x0b, 0x50, - 0x49, 0xcc, 0xff, 0x05, 0xff, 0x37, 0x50, 0x0b, 0x48, 0x68, 0x13, 0x8f, 0x39, 0xc9, 0x99, 0x4d, - 0xba, 0xdc, 0x5e, 0x3d, 0x11, 0x93, 0xab, 0x07, 0xac, 0x97, 0x42, 0x62, 0x2d, 0x83, 0x17, 0xff, - 0x5c, 0x7a, 0x17, 0x6a, 0x0b, 0x6e, 0x68, 0x1b, 0x20, 0x73, 0x94, 0xc3, 0x9b, 0xb3, 0xdc, 0xec, - 0x7a, 0x32, 0xd7, 0xad, 0x19, 0x6c, 0x3b, 0xfe, 0x32, 0x9a, 0xad, 0xaa, 0xf8, 0x2a, 0xa2, 0xbd, - 0xf8, 0xa1, 0xa7, 0xbc, 0x6e, 0x5f, 0x3a, 0xec, 0x2a, 0x1a, 0x9b, 0xb6, 0xef, 0x36, 0x44, 0xcc, - 0x9e, 0xe3, 0x51, 0x16, 0x46, 0xf1, 0xcc, 0xf1, 0xeb, 0xd8, 0xc8, 0xe0, 0xf6, 0xc4, 0x27, 0xef, - 0x25, 0xf1, 0xf6, 0x2e, 0xf3, 0x9f, 0xe0, 0xe3, 0x32, 0x7f, 0xf8, 0xf4, 0xcf, 0x00, 0x00, 0x00, - 0xff, 0xff, 0x8e, 0xfc, 0xd7, 0x46, 0xa8, 0x0b, 0x00, 0x00, -} diff --git a/cmd/bpa-operator/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1/resource.pb.go b/cmd/bpa-operator/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1/resource.pb.go deleted file mode 100644 index 38faa9f..0000000 --- a/cmd/bpa-operator/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1/resource.pb.go +++ /dev/null @@ -1,99 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: opencensus/proto/resource/v1/resource.proto - -package v1 - -import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -// Resource information. -type Resource struct { - // Type identifier for the resource. - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - // Set of labels that describe the resource. - Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Resource) Reset() { *m = Resource{} } -func (m *Resource) String() string { return proto.CompactTextString(m) } -func (*Resource) ProtoMessage() {} -func (*Resource) Descriptor() ([]byte, []int) { - return fileDescriptor_584700775a2fc762, []int{0} -} - -func (m *Resource) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Resource.Unmarshal(m, b) -} -func (m *Resource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Resource.Marshal(b, m, deterministic) -} -func (m *Resource) XXX_Merge(src proto.Message) { - xxx_messageInfo_Resource.Merge(m, src) -} -func (m *Resource) XXX_Size() int { - return xxx_messageInfo_Resource.Size(m) -} -func (m *Resource) XXX_DiscardUnknown() { - xxx_messageInfo_Resource.DiscardUnknown(m) -} - -var xxx_messageInfo_Resource proto.InternalMessageInfo - -func (m *Resource) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *Resource) GetLabels() map[string]string { - if m != nil { - return m.Labels - } - return nil -} - -func init() { - proto.RegisterType((*Resource)(nil), "opencensus.proto.resource.v1.Resource") - proto.RegisterMapType((map[string]string)(nil), "opencensus.proto.resource.v1.Resource.LabelsEntry") -} - -func init() { - proto.RegisterFile("opencensus/proto/resource/v1/resource.proto", fileDescriptor_584700775a2fc762) -} - -var fileDescriptor_584700775a2fc762 = []byte{ - // 234 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0xce, 0x2f, 0x48, 0xcd, - 0x4b, 0x4e, 0xcd, 0x2b, 0x2e, 0x2d, 0xd6, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0xd7, 0x2f, 0x4a, 0x2d, - 0xce, 0x2f, 0x2d, 0x4a, 0x4e, 0xd5, 0x2f, 0x33, 0x84, 0xb3, 0xf5, 0xc0, 0x52, 0x42, 0x32, 0x08, - 0xc5, 0x10, 0x11, 0x3d, 0xb8, 0x82, 0x32, 0x43, 0xa5, 0xa5, 0x8c, 0x5c, 0x1c, 0x41, 0x50, 0xbe, - 0x90, 0x10, 0x17, 0x4b, 0x49, 0x65, 0x41, 0xaa, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x98, - 0x2d, 0xe4, 0xc5, 0xc5, 0x96, 0x93, 0x98, 0x94, 0x9a, 0x53, 0x2c, 0xc1, 0xa4, 0xc0, 0xac, 0xc1, - 0x6d, 0x64, 0xa4, 0x87, 0xcf, 0x3c, 0x3d, 0x98, 0x59, 0x7a, 0x3e, 0x60, 0x4d, 0xae, 0x79, 0x25, - 0x45, 0x95, 0x41, 0x50, 0x13, 0xa4, 0x2c, 0xb9, 0xb8, 0x91, 0x84, 0x85, 0x04, 0xb8, 0x98, 0xb3, - 0x53, 0x2b, 0xa1, 0xb6, 0x81, 0x98, 0x42, 0x22, 0x5c, 0xac, 0x65, 0x89, 0x39, 0xa5, 0xa9, 0x12, - 0x4c, 0x60, 0x31, 0x08, 0xc7, 0x8a, 0xc9, 0x82, 0xd1, 0xa9, 0x92, 0x4b, 0x3e, 0x33, 0x1f, 0xaf, - 0xd5, 0x4e, 0xbc, 0x30, 0xbb, 0x03, 0x40, 0x52, 0x01, 0x8c, 0x51, 0xae, 0xe9, 0x99, 0x25, 0x19, - 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0x10, 0x5d, 0xba, 0x99, 0x79, 0xc5, 0x25, 0x45, 0xa5, - 0xb9, 0xa9, 0x79, 0x25, 0x89, 0x25, 0x99, 0xf9, 0x79, 0xfa, 0x08, 0x03, 0x75, 0x21, 0x01, 0x99, - 0x9e, 0x9a, 0xa7, 0x9b, 0x8e, 0x12, 0x9e, 0x49, 0x6c, 0x60, 0x19, 0x63, 0x40, 0x00, 0x00, 0x00, - 0xff, 0xff, 0x8e, 0x11, 0xaf, 0xda, 0x76, 0x01, 0x00, 0x00, -} diff --git a/cmd/bpa-operator/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace.pb.go b/cmd/bpa-operator/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace.pb.go deleted file mode 100644 index 4de0535..0000000 --- a/cmd/bpa-operator/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace.pb.go +++ /dev/null @@ -1,1543 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: opencensus/proto/trace/v1/trace.proto - -package v1 - -import ( - fmt "fmt" - v1 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" - proto "github.com/golang/protobuf/proto" - timestamp "github.com/golang/protobuf/ptypes/timestamp" - wrappers "github.com/golang/protobuf/ptypes/wrappers" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -// Type of span. Can be used to specify additional relationships between spans -// in addition to a parent/child relationship. -type Span_SpanKind int32 - -const ( - // Unspecified. - Span_SPAN_KIND_UNSPECIFIED Span_SpanKind = 0 - // Indicates that the span covers server-side handling of an RPC or other - // remote network request. - Span_SERVER Span_SpanKind = 1 - // Indicates that the span covers the client-side wrapper around an RPC or - // other remote request. - Span_CLIENT Span_SpanKind = 2 -) - -var Span_SpanKind_name = map[int32]string{ - 0: "SPAN_KIND_UNSPECIFIED", - 1: "SERVER", - 2: "CLIENT", -} - -var Span_SpanKind_value = map[string]int32{ - "SPAN_KIND_UNSPECIFIED": 0, - "SERVER": 1, - "CLIENT": 2, -} - -func (x Span_SpanKind) String() string { - return proto.EnumName(Span_SpanKind_name, int32(x)) -} - -func (Span_SpanKind) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{0, 0} -} - -// Indicates whether the message was sent or received. -type Span_TimeEvent_MessageEvent_Type int32 - -const ( - // Unknown event type. - Span_TimeEvent_MessageEvent_TYPE_UNSPECIFIED Span_TimeEvent_MessageEvent_Type = 0 - // Indicates a sent message. - Span_TimeEvent_MessageEvent_SENT Span_TimeEvent_MessageEvent_Type = 1 - // Indicates a received message. - Span_TimeEvent_MessageEvent_RECEIVED Span_TimeEvent_MessageEvent_Type = 2 -) - -var Span_TimeEvent_MessageEvent_Type_name = map[int32]string{ - 0: "TYPE_UNSPECIFIED", - 1: "SENT", - 2: "RECEIVED", -} - -var Span_TimeEvent_MessageEvent_Type_value = map[string]int32{ - "TYPE_UNSPECIFIED": 0, - "SENT": 1, - "RECEIVED": 2, -} - -func (x Span_TimeEvent_MessageEvent_Type) String() string { - return proto.EnumName(Span_TimeEvent_MessageEvent_Type_name, int32(x)) -} - -func (Span_TimeEvent_MessageEvent_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{0, 2, 1, 0} -} - -// The relationship of the current span relative to the linked span: child, -// parent, or unspecified. -type Span_Link_Type int32 - -const ( - // The relationship of the two spans is unknown, or known but other - // than parent-child. - Span_Link_TYPE_UNSPECIFIED Span_Link_Type = 0 - // The linked span is a child of the current span. - Span_Link_CHILD_LINKED_SPAN Span_Link_Type = 1 - // The linked span is a parent of the current span. - Span_Link_PARENT_LINKED_SPAN Span_Link_Type = 2 -) - -var Span_Link_Type_name = map[int32]string{ - 0: "TYPE_UNSPECIFIED", - 1: "CHILD_LINKED_SPAN", - 2: "PARENT_LINKED_SPAN", -} - -var Span_Link_Type_value = map[string]int32{ - "TYPE_UNSPECIFIED": 0, - "CHILD_LINKED_SPAN": 1, - "PARENT_LINKED_SPAN": 2, -} - -func (x Span_Link_Type) String() string { - return proto.EnumName(Span_Link_Type_name, int32(x)) -} - -func (Span_Link_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{0, 4, 0} -} - -// A span represents a single operation within a trace. Spans can be -// nested to form a trace tree. Spans may also be linked to other spans -// from the same or different trace. And form graphs. Often, a trace -// contains a root span that describes the end-to-end latency, and one -// or more subspans for its sub-operations. A trace can also contain -// multiple root spans, or none at all. Spans do not need to be -// contiguous - there may be gaps or overlaps between spans in a trace. -// -// The next id is 17. -// TODO(bdrutu): Add an example. -type Span struct { - // A unique identifier for a trace. All spans from the same trace share - // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes - // is considered invalid. - // - // This field is semantically required. Receiver should generate new - // random trace_id if empty or invalid trace_id was received. - // - // This field is required. - TraceId []byte `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` - // A unique identifier for a span within a trace, assigned when the span - // is created. The ID is an 8-byte array. An ID with all zeroes is considered - // invalid. - // - // This field is semantically required. Receiver should generate new - // random span_id if empty or invalid span_id was received. - // - // This field is required. - SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` - // The Tracestate on the span. - Tracestate *Span_Tracestate `protobuf:"bytes,15,opt,name=tracestate,proto3" json:"tracestate,omitempty"` - // The `span_id` of this span's parent span. If this is a root span, then this - // field must be empty. The ID is an 8-byte array. - ParentSpanId []byte `protobuf:"bytes,3,opt,name=parent_span_id,json=parentSpanId,proto3" json:"parent_span_id,omitempty"` - // A description of the span's operation. - // - // For example, the name can be a qualified method name or a file name - // and a line number where the operation is called. A best practice is to use - // the same display name at the same call point in an application. - // This makes it easier to correlate spans in different traces. - // - // This field is semantically required to be set to non-empty string. - // When null or empty string received - receiver may use string "name" - // as a replacement. There might be smarted algorithms implemented by - // receiver to fix the empty span name. - // - // This field is required. - Name *TruncatableString `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` - // Distinguishes between spans generated in a particular context. For example, - // two spans with the same name may be distinguished using `CLIENT` (caller) - // and `SERVER` (callee) to identify queueing latency associated with the span. - Kind Span_SpanKind `protobuf:"varint,14,opt,name=kind,proto3,enum=opencensus.proto.trace.v1.Span_SpanKind" json:"kind,omitempty"` - // The start time of the span. On the client side, this is the time kept by - // the local machine where the span execution starts. On the server side, this - // is the time when the server's application handler starts running. - // - // This field is semantically required. When not set on receive - - // receiver should set it to the value of end_time field if it was - // set. Or to the current time if neither was set. It is important to - // keep end_time > start_time for consistency. - // - // This field is required. - StartTime *timestamp.Timestamp `protobuf:"bytes,5,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` - // The end time of the span. On the client side, this is the time kept by - // the local machine where the span execution ends. On the server side, this - // is the time when the server application handler stops running. - // - // This field is semantically required. When not set on receive - - // receiver should set it to start_time value. It is important to - // keep end_time > start_time for consistency. - // - // This field is required. - EndTime *timestamp.Timestamp `protobuf:"bytes,6,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` - // A set of attributes on the span. - Attributes *Span_Attributes `protobuf:"bytes,7,opt,name=attributes,proto3" json:"attributes,omitempty"` - // A stack trace captured at the start of the span. - StackTrace *StackTrace `protobuf:"bytes,8,opt,name=stack_trace,json=stackTrace,proto3" json:"stack_trace,omitempty"` - // The included time events. - TimeEvents *Span_TimeEvents `protobuf:"bytes,9,opt,name=time_events,json=timeEvents,proto3" json:"time_events,omitempty"` - // The included links. - Links *Span_Links `protobuf:"bytes,10,opt,name=links,proto3" json:"links,omitempty"` - // An optional final status for this span. Semantically when Status - // wasn't set it is means span ended without errors and assume - // Status.Ok (code = 0). - Status *Status `protobuf:"bytes,11,opt,name=status,proto3" json:"status,omitempty"` - // An optional resource that is associated with this span. If not set, this span - // should be part of a batch that does include the resource information, unless resource - // information is unknown. - Resource *v1.Resource `protobuf:"bytes,16,opt,name=resource,proto3" json:"resource,omitempty"` - // A highly recommended but not required flag that identifies when a - // trace crosses a process boundary. True when the parent_span belongs - // to the same process as the current span. This flag is most commonly - // used to indicate the need to adjust time as clocks in different - // processes may not be synchronized. - SameProcessAsParentSpan *wrappers.BoolValue `protobuf:"bytes,12,opt,name=same_process_as_parent_span,json=sameProcessAsParentSpan,proto3" json:"same_process_as_parent_span,omitempty"` - // An optional number of child spans that were generated while this span - // was active. If set, allows an implementation to detect missing child spans. - ChildSpanCount *wrappers.UInt32Value `protobuf:"bytes,13,opt,name=child_span_count,json=childSpanCount,proto3" json:"child_span_count,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Span) Reset() { *m = Span{} } -func (m *Span) String() string { return proto.CompactTextString(m) } -func (*Span) ProtoMessage() {} -func (*Span) Descriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{0} -} - -func (m *Span) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Span.Unmarshal(m, b) -} -func (m *Span) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Span.Marshal(b, m, deterministic) -} -func (m *Span) XXX_Merge(src proto.Message) { - xxx_messageInfo_Span.Merge(m, src) -} -func (m *Span) XXX_Size() int { - return xxx_messageInfo_Span.Size(m) -} -func (m *Span) XXX_DiscardUnknown() { - xxx_messageInfo_Span.DiscardUnknown(m) -} - -var xxx_messageInfo_Span proto.InternalMessageInfo - -func (m *Span) GetTraceId() []byte { - if m != nil { - return m.TraceId - } - return nil -} - -func (m *Span) GetSpanId() []byte { - if m != nil { - return m.SpanId - } - return nil -} - -func (m *Span) GetTracestate() *Span_Tracestate { - if m != nil { - return m.Tracestate - } - return nil -} - -func (m *Span) GetParentSpanId() []byte { - if m != nil { - return m.ParentSpanId - } - return nil -} - -func (m *Span) GetName() *TruncatableString { - if m != nil { - return m.Name - } - return nil -} - -func (m *Span) GetKind() Span_SpanKind { - if m != nil { - return m.Kind - } - return Span_SPAN_KIND_UNSPECIFIED -} - -func (m *Span) GetStartTime() *timestamp.Timestamp { - if m != nil { - return m.StartTime - } - return nil -} - -func (m *Span) GetEndTime() *timestamp.Timestamp { - if m != nil { - return m.EndTime - } - return nil -} - -func (m *Span) GetAttributes() *Span_Attributes { - if m != nil { - return m.Attributes - } - return nil -} - -func (m *Span) GetStackTrace() *StackTrace { - if m != nil { - return m.StackTrace - } - return nil -} - -func (m *Span) GetTimeEvents() *Span_TimeEvents { - if m != nil { - return m.TimeEvents - } - return nil -} - -func (m *Span) GetLinks() *Span_Links { - if m != nil { - return m.Links - } - return nil -} - -func (m *Span) GetStatus() *Status { - if m != nil { - return m.Status - } - return nil -} - -func (m *Span) GetResource() *v1.Resource { - if m != nil { - return m.Resource - } - return nil -} - -func (m *Span) GetSameProcessAsParentSpan() *wrappers.BoolValue { - if m != nil { - return m.SameProcessAsParentSpan - } - return nil -} - -func (m *Span) GetChildSpanCount() *wrappers.UInt32Value { - if m != nil { - return m.ChildSpanCount - } - return nil -} - -// This field conveys information about request position in multiple distributed tracing graphs. -// It is a list of Tracestate.Entry with a maximum of 32 members in the list. -// -// See the https://github.com/w3c/distributed-tracing for more details about this field. -type Span_Tracestate struct { - // A list of entries that represent the Tracestate. - Entries []*Span_Tracestate_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Span_Tracestate) Reset() { *m = Span_Tracestate{} } -func (m *Span_Tracestate) String() string { return proto.CompactTextString(m) } -func (*Span_Tracestate) ProtoMessage() {} -func (*Span_Tracestate) Descriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{0, 0} -} - -func (m *Span_Tracestate) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Span_Tracestate.Unmarshal(m, b) -} -func (m *Span_Tracestate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Span_Tracestate.Marshal(b, m, deterministic) -} -func (m *Span_Tracestate) XXX_Merge(src proto.Message) { - xxx_messageInfo_Span_Tracestate.Merge(m, src) -} -func (m *Span_Tracestate) XXX_Size() int { - return xxx_messageInfo_Span_Tracestate.Size(m) -} -func (m *Span_Tracestate) XXX_DiscardUnknown() { - xxx_messageInfo_Span_Tracestate.DiscardUnknown(m) -} - -var xxx_messageInfo_Span_Tracestate proto.InternalMessageInfo - -func (m *Span_Tracestate) GetEntries() []*Span_Tracestate_Entry { - if m != nil { - return m.Entries - } - return nil -} - -type Span_Tracestate_Entry struct { - // The key must begin with a lowercase letter, and can only contain - // lowercase letters 'a'-'z', digits '0'-'9', underscores '_', dashes - // '-', asterisks '*', and forward slashes '/'. - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - // The value is opaque string up to 256 characters printable ASCII - // RFC0020 characters (i.e., the range 0x20 to 0x7E) except ',' and '='. - // Note that this also excludes tabs, newlines, carriage returns, etc. - Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Span_Tracestate_Entry) Reset() { *m = Span_Tracestate_Entry{} } -func (m *Span_Tracestate_Entry) String() string { return proto.CompactTextString(m) } -func (*Span_Tracestate_Entry) ProtoMessage() {} -func (*Span_Tracestate_Entry) Descriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{0, 0, 0} -} - -func (m *Span_Tracestate_Entry) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Span_Tracestate_Entry.Unmarshal(m, b) -} -func (m *Span_Tracestate_Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Span_Tracestate_Entry.Marshal(b, m, deterministic) -} -func (m *Span_Tracestate_Entry) XXX_Merge(src proto.Message) { - xxx_messageInfo_Span_Tracestate_Entry.Merge(m, src) -} -func (m *Span_Tracestate_Entry) XXX_Size() int { - return xxx_messageInfo_Span_Tracestate_Entry.Size(m) -} -func (m *Span_Tracestate_Entry) XXX_DiscardUnknown() { - xxx_messageInfo_Span_Tracestate_Entry.DiscardUnknown(m) -} - -var xxx_messageInfo_Span_Tracestate_Entry proto.InternalMessageInfo - -func (m *Span_Tracestate_Entry) GetKey() string { - if m != nil { - return m.Key - } - return "" -} - -func (m *Span_Tracestate_Entry) GetValue() string { - if m != nil { - return m.Value - } - return "" -} - -// A set of attributes, each with a key and a value. -type Span_Attributes struct { - // The set of attributes. The value can be a string, an integer, a double - // or the Boolean values `true` or `false`. Note, global attributes like - // server name can be set as tags using resource API. Examples of attributes: - // - // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" - // "/http/server_latency": 300 - // "abc.com/myattribute": true - // "abc.com/score": 10.239 - AttributeMap map[string]*AttributeValue `protobuf:"bytes,1,rep,name=attribute_map,json=attributeMap,proto3" json:"attribute_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // The number of attributes that were discarded. Attributes can be discarded - // because their keys are too long or because there are too many attributes. - // If this value is 0, then no attributes were dropped. - DroppedAttributesCount int32 `protobuf:"varint,2,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Span_Attributes) Reset() { *m = Span_Attributes{} } -func (m *Span_Attributes) String() string { return proto.CompactTextString(m) } -func (*Span_Attributes) ProtoMessage() {} -func (*Span_Attributes) Descriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{0, 1} -} - -func (m *Span_Attributes) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Span_Attributes.Unmarshal(m, b) -} -func (m *Span_Attributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Span_Attributes.Marshal(b, m, deterministic) -} -func (m *Span_Attributes) XXX_Merge(src proto.Message) { - xxx_messageInfo_Span_Attributes.Merge(m, src) -} -func (m *Span_Attributes) XXX_Size() int { - return xxx_messageInfo_Span_Attributes.Size(m) -} -func (m *Span_Attributes) XXX_DiscardUnknown() { - xxx_messageInfo_Span_Attributes.DiscardUnknown(m) -} - -var xxx_messageInfo_Span_Attributes proto.InternalMessageInfo - -func (m *Span_Attributes) GetAttributeMap() map[string]*AttributeValue { - if m != nil { - return m.AttributeMap - } - return nil -} - -func (m *Span_Attributes) GetDroppedAttributesCount() int32 { - if m != nil { - return m.DroppedAttributesCount - } - return 0 -} - -// A time-stamped annotation or message event in the Span. -type Span_TimeEvent struct { - // The time the event occurred. - Time *timestamp.Timestamp `protobuf:"bytes,1,opt,name=time,proto3" json:"time,omitempty"` - // A `TimeEvent` can contain either an `Annotation` object or a - // `MessageEvent` object, but not both. - // - // Types that are valid to be assigned to Value: - // *Span_TimeEvent_Annotation_ - // *Span_TimeEvent_MessageEvent_ - Value isSpan_TimeEvent_Value `protobuf_oneof:"value"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Span_TimeEvent) Reset() { *m = Span_TimeEvent{} } -func (m *Span_TimeEvent) String() string { return proto.CompactTextString(m) } -func (*Span_TimeEvent) ProtoMessage() {} -func (*Span_TimeEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{0, 2} -} - -func (m *Span_TimeEvent) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Span_TimeEvent.Unmarshal(m, b) -} -func (m *Span_TimeEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Span_TimeEvent.Marshal(b, m, deterministic) -} -func (m *Span_TimeEvent) XXX_Merge(src proto.Message) { - xxx_messageInfo_Span_TimeEvent.Merge(m, src) -} -func (m *Span_TimeEvent) XXX_Size() int { - return xxx_messageInfo_Span_TimeEvent.Size(m) -} -func (m *Span_TimeEvent) XXX_DiscardUnknown() { - xxx_messageInfo_Span_TimeEvent.DiscardUnknown(m) -} - -var xxx_messageInfo_Span_TimeEvent proto.InternalMessageInfo - -func (m *Span_TimeEvent) GetTime() *timestamp.Timestamp { - if m != nil { - return m.Time - } - return nil -} - -type isSpan_TimeEvent_Value interface { - isSpan_TimeEvent_Value() -} - -type Span_TimeEvent_Annotation_ struct { - Annotation *Span_TimeEvent_Annotation `protobuf:"bytes,2,opt,name=annotation,proto3,oneof"` -} - -type Span_TimeEvent_MessageEvent_ struct { - MessageEvent *Span_TimeEvent_MessageEvent `protobuf:"bytes,3,opt,name=message_event,json=messageEvent,proto3,oneof"` -} - -func (*Span_TimeEvent_Annotation_) isSpan_TimeEvent_Value() {} - -func (*Span_TimeEvent_MessageEvent_) isSpan_TimeEvent_Value() {} - -func (m *Span_TimeEvent) GetValue() isSpan_TimeEvent_Value { - if m != nil { - return m.Value - } - return nil -} - -func (m *Span_TimeEvent) GetAnnotation() *Span_TimeEvent_Annotation { - if x, ok := m.GetValue().(*Span_TimeEvent_Annotation_); ok { - return x.Annotation - } - return nil -} - -func (m *Span_TimeEvent) GetMessageEvent() *Span_TimeEvent_MessageEvent { - if x, ok := m.GetValue().(*Span_TimeEvent_MessageEvent_); ok { - return x.MessageEvent - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*Span_TimeEvent) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*Span_TimeEvent_Annotation_)(nil), - (*Span_TimeEvent_MessageEvent_)(nil), - } -} - -// A text annotation with a set of attributes. -type Span_TimeEvent_Annotation struct { - // A user-supplied message describing the event. - Description *TruncatableString `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` - // A set of attributes on the annotation. - Attributes *Span_Attributes `protobuf:"bytes,2,opt,name=attributes,proto3" json:"attributes,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Span_TimeEvent_Annotation) Reset() { *m = Span_TimeEvent_Annotation{} } -func (m *Span_TimeEvent_Annotation) String() string { return proto.CompactTextString(m) } -func (*Span_TimeEvent_Annotation) ProtoMessage() {} -func (*Span_TimeEvent_Annotation) Descriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{0, 2, 0} -} - -func (m *Span_TimeEvent_Annotation) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Span_TimeEvent_Annotation.Unmarshal(m, b) -} -func (m *Span_TimeEvent_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Span_TimeEvent_Annotation.Marshal(b, m, deterministic) -} -func (m *Span_TimeEvent_Annotation) XXX_Merge(src proto.Message) { - xxx_messageInfo_Span_TimeEvent_Annotation.Merge(m, src) -} -func (m *Span_TimeEvent_Annotation) XXX_Size() int { - return xxx_messageInfo_Span_TimeEvent_Annotation.Size(m) -} -func (m *Span_TimeEvent_Annotation) XXX_DiscardUnknown() { - xxx_messageInfo_Span_TimeEvent_Annotation.DiscardUnknown(m) -} - -var xxx_messageInfo_Span_TimeEvent_Annotation proto.InternalMessageInfo - -func (m *Span_TimeEvent_Annotation) GetDescription() *TruncatableString { - if m != nil { - return m.Description - } - return nil -} - -func (m *Span_TimeEvent_Annotation) GetAttributes() *Span_Attributes { - if m != nil { - return m.Attributes - } - return nil -} - -// An event describing a message sent/received between Spans. -type Span_TimeEvent_MessageEvent struct { - // The type of MessageEvent. Indicates whether the message was sent or - // received. - Type Span_TimeEvent_MessageEvent_Type `protobuf:"varint,1,opt,name=type,proto3,enum=opencensus.proto.trace.v1.Span_TimeEvent_MessageEvent_Type" json:"type,omitempty"` - // An identifier for the MessageEvent's message that can be used to match - // SENT and RECEIVED MessageEvents. For example, this field could - // represent a sequence ID for a streaming RPC. It is recommended to be - // unique within a Span. - Id uint64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"` - // The number of uncompressed bytes sent or received. - UncompressedSize uint64 `protobuf:"varint,3,opt,name=uncompressed_size,json=uncompressedSize,proto3" json:"uncompressed_size,omitempty"` - // The number of compressed bytes sent or received. If zero, assumed to - // be the same size as uncompressed. - CompressedSize uint64 `protobuf:"varint,4,opt,name=compressed_size,json=compressedSize,proto3" json:"compressed_size,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Span_TimeEvent_MessageEvent) Reset() { *m = Span_TimeEvent_MessageEvent{} } -func (m *Span_TimeEvent_MessageEvent) String() string { return proto.CompactTextString(m) } -func (*Span_TimeEvent_MessageEvent) ProtoMessage() {} -func (*Span_TimeEvent_MessageEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{0, 2, 1} -} - -func (m *Span_TimeEvent_MessageEvent) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Span_TimeEvent_MessageEvent.Unmarshal(m, b) -} -func (m *Span_TimeEvent_MessageEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Span_TimeEvent_MessageEvent.Marshal(b, m, deterministic) -} -func (m *Span_TimeEvent_MessageEvent) XXX_Merge(src proto.Message) { - xxx_messageInfo_Span_TimeEvent_MessageEvent.Merge(m, src) -} -func (m *Span_TimeEvent_MessageEvent) XXX_Size() int { - return xxx_messageInfo_Span_TimeEvent_MessageEvent.Size(m) -} -func (m *Span_TimeEvent_MessageEvent) XXX_DiscardUnknown() { - xxx_messageInfo_Span_TimeEvent_MessageEvent.DiscardUnknown(m) -} - -var xxx_messageInfo_Span_TimeEvent_MessageEvent proto.InternalMessageInfo - -func (m *Span_TimeEvent_MessageEvent) GetType() Span_TimeEvent_MessageEvent_Type { - if m != nil { - return m.Type - } - return Span_TimeEvent_MessageEvent_TYPE_UNSPECIFIED -} - -func (m *Span_TimeEvent_MessageEvent) GetId() uint64 { - if m != nil { - return m.Id - } - return 0 -} - -func (m *Span_TimeEvent_MessageEvent) GetUncompressedSize() uint64 { - if m != nil { - return m.UncompressedSize - } - return 0 -} - -func (m *Span_TimeEvent_MessageEvent) GetCompressedSize() uint64 { - if m != nil { - return m.CompressedSize - } - return 0 -} - -// A collection of `TimeEvent`s. A `TimeEvent` is a time-stamped annotation -// on the span, consisting of either user-supplied key-value pairs, or -// details of a message sent/received between Spans. -type Span_TimeEvents struct { - // A collection of `TimeEvent`s. - TimeEvent []*Span_TimeEvent `protobuf:"bytes,1,rep,name=time_event,json=timeEvent,proto3" json:"time_event,omitempty"` - // The number of dropped annotations in all the included time events. - // If the value is 0, then no annotations were dropped. - DroppedAnnotationsCount int32 `protobuf:"varint,2,opt,name=dropped_annotations_count,json=droppedAnnotationsCount,proto3" json:"dropped_annotations_count,omitempty"` - // The number of dropped message events in all the included time events. - // If the value is 0, then no message events were dropped. - DroppedMessageEventsCount int32 `protobuf:"varint,3,opt,name=dropped_message_events_count,json=droppedMessageEventsCount,proto3" json:"dropped_message_events_count,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Span_TimeEvents) Reset() { *m = Span_TimeEvents{} } -func (m *Span_TimeEvents) String() string { return proto.CompactTextString(m) } -func (*Span_TimeEvents) ProtoMessage() {} -func (*Span_TimeEvents) Descriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{0, 3} -} - -func (m *Span_TimeEvents) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Span_TimeEvents.Unmarshal(m, b) -} -func (m *Span_TimeEvents) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Span_TimeEvents.Marshal(b, m, deterministic) -} -func (m *Span_TimeEvents) XXX_Merge(src proto.Message) { - xxx_messageInfo_Span_TimeEvents.Merge(m, src) -} -func (m *Span_TimeEvents) XXX_Size() int { - return xxx_messageInfo_Span_TimeEvents.Size(m) -} -func (m *Span_TimeEvents) XXX_DiscardUnknown() { - xxx_messageInfo_Span_TimeEvents.DiscardUnknown(m) -} - -var xxx_messageInfo_Span_TimeEvents proto.InternalMessageInfo - -func (m *Span_TimeEvents) GetTimeEvent() []*Span_TimeEvent { - if m != nil { - return m.TimeEvent - } - return nil -} - -func (m *Span_TimeEvents) GetDroppedAnnotationsCount() int32 { - if m != nil { - return m.DroppedAnnotationsCount - } - return 0 -} - -func (m *Span_TimeEvents) GetDroppedMessageEventsCount() int32 { - if m != nil { - return m.DroppedMessageEventsCount - } - return 0 -} - -// A pointer from the current span to another span in the same trace or in a -// different trace. For example, this can be used in batching operations, -// where a single batch handler processes multiple requests from different -// traces or when the handler receives a request from a different project. -type Span_Link struct { - // A unique identifier of a trace that this linked span is part of. The ID is a - // 16-byte array. - TraceId []byte `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` - // A unique identifier for the linked span. The ID is an 8-byte array. - SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` - // The relationship of the current span relative to the linked span. - Type Span_Link_Type `protobuf:"varint,3,opt,name=type,proto3,enum=opencensus.proto.trace.v1.Span_Link_Type" json:"type,omitempty"` - // A set of attributes on the link. - Attributes *Span_Attributes `protobuf:"bytes,4,opt,name=attributes,proto3" json:"attributes,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Span_Link) Reset() { *m = Span_Link{} } -func (m *Span_Link) String() string { return proto.CompactTextString(m) } -func (*Span_Link) ProtoMessage() {} -func (*Span_Link) Descriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{0, 4} -} - -func (m *Span_Link) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Span_Link.Unmarshal(m, b) -} -func (m *Span_Link) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Span_Link.Marshal(b, m, deterministic) -} -func (m *Span_Link) XXX_Merge(src proto.Message) { - xxx_messageInfo_Span_Link.Merge(m, src) -} -func (m *Span_Link) XXX_Size() int { - return xxx_messageInfo_Span_Link.Size(m) -} -func (m *Span_Link) XXX_DiscardUnknown() { - xxx_messageInfo_Span_Link.DiscardUnknown(m) -} - -var xxx_messageInfo_Span_Link proto.InternalMessageInfo - -func (m *Span_Link) GetTraceId() []byte { - if m != nil { - return m.TraceId - } - return nil -} - -func (m *Span_Link) GetSpanId() []byte { - if m != nil { - return m.SpanId - } - return nil -} - -func (m *Span_Link) GetType() Span_Link_Type { - if m != nil { - return m.Type - } - return Span_Link_TYPE_UNSPECIFIED -} - -func (m *Span_Link) GetAttributes() *Span_Attributes { - if m != nil { - return m.Attributes - } - return nil -} - -// A collection of links, which are references from this span to a span -// in the same or different trace. -type Span_Links struct { - // A collection of links. - Link []*Span_Link `protobuf:"bytes,1,rep,name=link,proto3" json:"link,omitempty"` - // The number of dropped links after the maximum size was enforced. If - // this value is 0, then no links were dropped. - DroppedLinksCount int32 `protobuf:"varint,2,opt,name=dropped_links_count,json=droppedLinksCount,proto3" json:"dropped_links_count,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Span_Links) Reset() { *m = Span_Links{} } -func (m *Span_Links) String() string { return proto.CompactTextString(m) } -func (*Span_Links) ProtoMessage() {} -func (*Span_Links) Descriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{0, 5} -} - -func (m *Span_Links) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Span_Links.Unmarshal(m, b) -} -func (m *Span_Links) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Span_Links.Marshal(b, m, deterministic) -} -func (m *Span_Links) XXX_Merge(src proto.Message) { - xxx_messageInfo_Span_Links.Merge(m, src) -} -func (m *Span_Links) XXX_Size() int { - return xxx_messageInfo_Span_Links.Size(m) -} -func (m *Span_Links) XXX_DiscardUnknown() { - xxx_messageInfo_Span_Links.DiscardUnknown(m) -} - -var xxx_messageInfo_Span_Links proto.InternalMessageInfo - -func (m *Span_Links) GetLink() []*Span_Link { - if m != nil { - return m.Link - } - return nil -} - -func (m *Span_Links) GetDroppedLinksCount() int32 { - if m != nil { - return m.DroppedLinksCount - } - return 0 -} - -// The `Status` type defines a logical error model that is suitable for different -// programming environments, including REST APIs and RPC APIs. This proto's fields -// are a subset of those of -// [google.rpc.Status](https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto), -// which is used by [gRPC](https://github.com/grpc). -type Status struct { - // The status code. This is optional field. It is safe to assume 0 (OK) - // when not set. - Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` - // A developer-facing error message, which should be in English. - Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Status) Reset() { *m = Status{} } -func (m *Status) String() string { return proto.CompactTextString(m) } -func (*Status) ProtoMessage() {} -func (*Status) Descriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{1} -} - -func (m *Status) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Status.Unmarshal(m, b) -} -func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Status.Marshal(b, m, deterministic) -} -func (m *Status) XXX_Merge(src proto.Message) { - xxx_messageInfo_Status.Merge(m, src) -} -func (m *Status) XXX_Size() int { - return xxx_messageInfo_Status.Size(m) -} -func (m *Status) XXX_DiscardUnknown() { - xxx_messageInfo_Status.DiscardUnknown(m) -} - -var xxx_messageInfo_Status proto.InternalMessageInfo - -func (m *Status) GetCode() int32 { - if m != nil { - return m.Code - } - return 0 -} - -func (m *Status) GetMessage() string { - if m != nil { - return m.Message - } - return "" -} - -// The value of an Attribute. -type AttributeValue struct { - // The type of the value. - // - // Types that are valid to be assigned to Value: - // *AttributeValue_StringValue - // *AttributeValue_IntValue - // *AttributeValue_BoolValue - // *AttributeValue_DoubleValue - Value isAttributeValue_Value `protobuf_oneof:"value"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AttributeValue) Reset() { *m = AttributeValue{} } -func (m *AttributeValue) String() string { return proto.CompactTextString(m) } -func (*AttributeValue) ProtoMessage() {} -func (*AttributeValue) Descriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{2} -} - -func (m *AttributeValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AttributeValue.Unmarshal(m, b) -} -func (m *AttributeValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AttributeValue.Marshal(b, m, deterministic) -} -func (m *AttributeValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_AttributeValue.Merge(m, src) -} -func (m *AttributeValue) XXX_Size() int { - return xxx_messageInfo_AttributeValue.Size(m) -} -func (m *AttributeValue) XXX_DiscardUnknown() { - xxx_messageInfo_AttributeValue.DiscardUnknown(m) -} - -var xxx_messageInfo_AttributeValue proto.InternalMessageInfo - -type isAttributeValue_Value interface { - isAttributeValue_Value() -} - -type AttributeValue_StringValue struct { - StringValue *TruncatableString `protobuf:"bytes,1,opt,name=string_value,json=stringValue,proto3,oneof"` -} - -type AttributeValue_IntValue struct { - IntValue int64 `protobuf:"varint,2,opt,name=int_value,json=intValue,proto3,oneof"` -} - -type AttributeValue_BoolValue struct { - BoolValue bool `protobuf:"varint,3,opt,name=bool_value,json=boolValue,proto3,oneof"` -} - -type AttributeValue_DoubleValue struct { - DoubleValue float64 `protobuf:"fixed64,4,opt,name=double_value,json=doubleValue,proto3,oneof"` -} - -func (*AttributeValue_StringValue) isAttributeValue_Value() {} - -func (*AttributeValue_IntValue) isAttributeValue_Value() {} - -func (*AttributeValue_BoolValue) isAttributeValue_Value() {} - -func (*AttributeValue_DoubleValue) isAttributeValue_Value() {} - -func (m *AttributeValue) GetValue() isAttributeValue_Value { - if m != nil { - return m.Value - } - return nil -} - -func (m *AttributeValue) GetStringValue() *TruncatableString { - if x, ok := m.GetValue().(*AttributeValue_StringValue); ok { - return x.StringValue - } - return nil -} - -func (m *AttributeValue) GetIntValue() int64 { - if x, ok := m.GetValue().(*AttributeValue_IntValue); ok { - return x.IntValue - } - return 0 -} - -func (m *AttributeValue) GetBoolValue() bool { - if x, ok := m.GetValue().(*AttributeValue_BoolValue); ok { - return x.BoolValue - } - return false -} - -func (m *AttributeValue) GetDoubleValue() float64 { - if x, ok := m.GetValue().(*AttributeValue_DoubleValue); ok { - return x.DoubleValue - } - return 0 -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*AttributeValue) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*AttributeValue_StringValue)(nil), - (*AttributeValue_IntValue)(nil), - (*AttributeValue_BoolValue)(nil), - (*AttributeValue_DoubleValue)(nil), - } -} - -// The call stack which originated this span. -type StackTrace struct { - // Stack frames in this stack trace. - StackFrames *StackTrace_StackFrames `protobuf:"bytes,1,opt,name=stack_frames,json=stackFrames,proto3" json:"stack_frames,omitempty"` - // The hash ID is used to conserve network bandwidth for duplicate - // stack traces within a single trace. - // - // Often multiple spans will have identical stack traces. - // The first occurrence of a stack trace should contain both - // `stack_frames` and a value in `stack_trace_hash_id`. - // - // Subsequent spans within the same request can refer - // to that stack trace by setting only `stack_trace_hash_id`. - // - // TODO: describe how to deal with the case where stack_trace_hash_id is - // zero because it was not set. - StackTraceHashId uint64 `protobuf:"varint,2,opt,name=stack_trace_hash_id,json=stackTraceHashId,proto3" json:"stack_trace_hash_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StackTrace) Reset() { *m = StackTrace{} } -func (m *StackTrace) String() string { return proto.CompactTextString(m) } -func (*StackTrace) ProtoMessage() {} -func (*StackTrace) Descriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{3} -} - -func (m *StackTrace) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StackTrace.Unmarshal(m, b) -} -func (m *StackTrace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StackTrace.Marshal(b, m, deterministic) -} -func (m *StackTrace) XXX_Merge(src proto.Message) { - xxx_messageInfo_StackTrace.Merge(m, src) -} -func (m *StackTrace) XXX_Size() int { - return xxx_messageInfo_StackTrace.Size(m) -} -func (m *StackTrace) XXX_DiscardUnknown() { - xxx_messageInfo_StackTrace.DiscardUnknown(m) -} - -var xxx_messageInfo_StackTrace proto.InternalMessageInfo - -func (m *StackTrace) GetStackFrames() *StackTrace_StackFrames { - if m != nil { - return m.StackFrames - } - return nil -} - -func (m *StackTrace) GetStackTraceHashId() uint64 { - if m != nil { - return m.StackTraceHashId - } - return 0 -} - -// A single stack frame in a stack trace. -type StackTrace_StackFrame struct { - // The fully-qualified name that uniquely identifies the function or - // method that is active in this frame. - FunctionName *TruncatableString `protobuf:"bytes,1,opt,name=function_name,json=functionName,proto3" json:"function_name,omitempty"` - // An un-mangled function name, if `function_name` is - // [mangled](http://www.avabodh.com/cxxin/namemangling.html). The name can - // be fully qualified. - OriginalFunctionName *TruncatableString `protobuf:"bytes,2,opt,name=original_function_name,json=originalFunctionName,proto3" json:"original_function_name,omitempty"` - // The name of the source file where the function call appears. - FileName *TruncatableString `protobuf:"bytes,3,opt,name=file_name,json=fileName,proto3" json:"file_name,omitempty"` - // The line number in `file_name` where the function call appears. - LineNumber int64 `protobuf:"varint,4,opt,name=line_number,json=lineNumber,proto3" json:"line_number,omitempty"` - // The column number where the function call appears, if available. - // This is important in JavaScript because of its anonymous functions. - ColumnNumber int64 `protobuf:"varint,5,opt,name=column_number,json=columnNumber,proto3" json:"column_number,omitempty"` - // The binary module from where the code was loaded. - LoadModule *Module `protobuf:"bytes,6,opt,name=load_module,json=loadModule,proto3" json:"load_module,omitempty"` - // The version of the deployed source code. - SourceVersion *TruncatableString `protobuf:"bytes,7,opt,name=source_version,json=sourceVersion,proto3" json:"source_version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StackTrace_StackFrame) Reset() { *m = StackTrace_StackFrame{} } -func (m *StackTrace_StackFrame) String() string { return proto.CompactTextString(m) } -func (*StackTrace_StackFrame) ProtoMessage() {} -func (*StackTrace_StackFrame) Descriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{3, 0} -} - -func (m *StackTrace_StackFrame) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StackTrace_StackFrame.Unmarshal(m, b) -} -func (m *StackTrace_StackFrame) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StackTrace_StackFrame.Marshal(b, m, deterministic) -} -func (m *StackTrace_StackFrame) XXX_Merge(src proto.Message) { - xxx_messageInfo_StackTrace_StackFrame.Merge(m, src) -} -func (m *StackTrace_StackFrame) XXX_Size() int { - return xxx_messageInfo_StackTrace_StackFrame.Size(m) -} -func (m *StackTrace_StackFrame) XXX_DiscardUnknown() { - xxx_messageInfo_StackTrace_StackFrame.DiscardUnknown(m) -} - -var xxx_messageInfo_StackTrace_StackFrame proto.InternalMessageInfo - -func (m *StackTrace_StackFrame) GetFunctionName() *TruncatableString { - if m != nil { - return m.FunctionName - } - return nil -} - -func (m *StackTrace_StackFrame) GetOriginalFunctionName() *TruncatableString { - if m != nil { - return m.OriginalFunctionName - } - return nil -} - -func (m *StackTrace_StackFrame) GetFileName() *TruncatableString { - if m != nil { - return m.FileName - } - return nil -} - -func (m *StackTrace_StackFrame) GetLineNumber() int64 { - if m != nil { - return m.LineNumber - } - return 0 -} - -func (m *StackTrace_StackFrame) GetColumnNumber() int64 { - if m != nil { - return m.ColumnNumber - } - return 0 -} - -func (m *StackTrace_StackFrame) GetLoadModule() *Module { - if m != nil { - return m.LoadModule - } - return nil -} - -func (m *StackTrace_StackFrame) GetSourceVersion() *TruncatableString { - if m != nil { - return m.SourceVersion - } - return nil -} - -// A collection of stack frames, which can be truncated. -type StackTrace_StackFrames struct { - // Stack frames in this call stack. - Frame []*StackTrace_StackFrame `protobuf:"bytes,1,rep,name=frame,proto3" json:"frame,omitempty"` - // The number of stack frames that were dropped because there - // were too many stack frames. - // If this value is 0, then no stack frames were dropped. - DroppedFramesCount int32 `protobuf:"varint,2,opt,name=dropped_frames_count,json=droppedFramesCount,proto3" json:"dropped_frames_count,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StackTrace_StackFrames) Reset() { *m = StackTrace_StackFrames{} } -func (m *StackTrace_StackFrames) String() string { return proto.CompactTextString(m) } -func (*StackTrace_StackFrames) ProtoMessage() {} -func (*StackTrace_StackFrames) Descriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{3, 1} -} - -func (m *StackTrace_StackFrames) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StackTrace_StackFrames.Unmarshal(m, b) -} -func (m *StackTrace_StackFrames) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StackTrace_StackFrames.Marshal(b, m, deterministic) -} -func (m *StackTrace_StackFrames) XXX_Merge(src proto.Message) { - xxx_messageInfo_StackTrace_StackFrames.Merge(m, src) -} -func (m *StackTrace_StackFrames) XXX_Size() int { - return xxx_messageInfo_StackTrace_StackFrames.Size(m) -} -func (m *StackTrace_StackFrames) XXX_DiscardUnknown() { - xxx_messageInfo_StackTrace_StackFrames.DiscardUnknown(m) -} - -var xxx_messageInfo_StackTrace_StackFrames proto.InternalMessageInfo - -func (m *StackTrace_StackFrames) GetFrame() []*StackTrace_StackFrame { - if m != nil { - return m.Frame - } - return nil -} - -func (m *StackTrace_StackFrames) GetDroppedFramesCount() int32 { - if m != nil { - return m.DroppedFramesCount - } - return 0 -} - -// A description of a binary module. -type Module struct { - // TODO: document the meaning of this field. - // For example: main binary, kernel modules, and dynamic libraries - // such as libc.so, sharedlib.so. - Module *TruncatableString `protobuf:"bytes,1,opt,name=module,proto3" json:"module,omitempty"` - // A unique identifier for the module, usually a hash of its - // contents. - BuildId *TruncatableString `protobuf:"bytes,2,opt,name=build_id,json=buildId,proto3" json:"build_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Module) Reset() { *m = Module{} } -func (m *Module) String() string { return proto.CompactTextString(m) } -func (*Module) ProtoMessage() {} -func (*Module) Descriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{4} -} - -func (m *Module) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Module.Unmarshal(m, b) -} -func (m *Module) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Module.Marshal(b, m, deterministic) -} -func (m *Module) XXX_Merge(src proto.Message) { - xxx_messageInfo_Module.Merge(m, src) -} -func (m *Module) XXX_Size() int { - return xxx_messageInfo_Module.Size(m) -} -func (m *Module) XXX_DiscardUnknown() { - xxx_messageInfo_Module.DiscardUnknown(m) -} - -var xxx_messageInfo_Module proto.InternalMessageInfo - -func (m *Module) GetModule() *TruncatableString { - if m != nil { - return m.Module - } - return nil -} - -func (m *Module) GetBuildId() *TruncatableString { - if m != nil { - return m.BuildId - } - return nil -} - -// A string that might be shortened to a specified length. -type TruncatableString struct { - // The shortened string. For example, if the original string was 500 bytes long and - // the limit of the string was 128 bytes, then this value contains the first 128 - // bytes of the 500-byte string. Note that truncation always happens on a - // character boundary, to ensure that a truncated string is still valid UTF-8. - // Because it may contain multi-byte characters, the size of the truncated string - // may be less than the truncation limit. - Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` - // The number of bytes removed from the original string. If this - // value is 0, then the string was not shortened. - TruncatedByteCount int32 `protobuf:"varint,2,opt,name=truncated_byte_count,json=truncatedByteCount,proto3" json:"truncated_byte_count,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *TruncatableString) Reset() { *m = TruncatableString{} } -func (m *TruncatableString) String() string { return proto.CompactTextString(m) } -func (*TruncatableString) ProtoMessage() {} -func (*TruncatableString) Descriptor() ([]byte, []int) { - return fileDescriptor_8ea38bbb821bf584, []int{5} -} - -func (m *TruncatableString) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TruncatableString.Unmarshal(m, b) -} -func (m *TruncatableString) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TruncatableString.Marshal(b, m, deterministic) -} -func (m *TruncatableString) XXX_Merge(src proto.Message) { - xxx_messageInfo_TruncatableString.Merge(m, src) -} -func (m *TruncatableString) XXX_Size() int { - return xxx_messageInfo_TruncatableString.Size(m) -} -func (m *TruncatableString) XXX_DiscardUnknown() { - xxx_messageInfo_TruncatableString.DiscardUnknown(m) -} - -var xxx_messageInfo_TruncatableString proto.InternalMessageInfo - -func (m *TruncatableString) GetValue() string { - if m != nil { - return m.Value - } - return "" -} - -func (m *TruncatableString) GetTruncatedByteCount() int32 { - if m != nil { - return m.TruncatedByteCount - } - return 0 -} - -func init() { - proto.RegisterEnum("opencensus.proto.trace.v1.Span_SpanKind", Span_SpanKind_name, Span_SpanKind_value) - proto.RegisterEnum("opencensus.proto.trace.v1.Span_TimeEvent_MessageEvent_Type", Span_TimeEvent_MessageEvent_Type_name, Span_TimeEvent_MessageEvent_Type_value) - proto.RegisterEnum("opencensus.proto.trace.v1.Span_Link_Type", Span_Link_Type_name, Span_Link_Type_value) - proto.RegisterType((*Span)(nil), "opencensus.proto.trace.v1.Span") - proto.RegisterType((*Span_Tracestate)(nil), "opencensus.proto.trace.v1.Span.Tracestate") - proto.RegisterType((*Span_Tracestate_Entry)(nil), "opencensus.proto.trace.v1.Span.Tracestate.Entry") - proto.RegisterType((*Span_Attributes)(nil), "opencensus.proto.trace.v1.Span.Attributes") - proto.RegisterMapType((map[string]*AttributeValue)(nil), "opencensus.proto.trace.v1.Span.Attributes.AttributeMapEntry") - proto.RegisterType((*Span_TimeEvent)(nil), "opencensus.proto.trace.v1.Span.TimeEvent") - proto.RegisterType((*Span_TimeEvent_Annotation)(nil), "opencensus.proto.trace.v1.Span.TimeEvent.Annotation") - proto.RegisterType((*Span_TimeEvent_MessageEvent)(nil), "opencensus.proto.trace.v1.Span.TimeEvent.MessageEvent") - proto.RegisterType((*Span_TimeEvents)(nil), "opencensus.proto.trace.v1.Span.TimeEvents") - proto.RegisterType((*Span_Link)(nil), "opencensus.proto.trace.v1.Span.Link") - proto.RegisterType((*Span_Links)(nil), "opencensus.proto.trace.v1.Span.Links") - proto.RegisterType((*Status)(nil), "opencensus.proto.trace.v1.Status") - proto.RegisterType((*AttributeValue)(nil), "opencensus.proto.trace.v1.AttributeValue") - proto.RegisterType((*StackTrace)(nil), "opencensus.proto.trace.v1.StackTrace") - proto.RegisterType((*StackTrace_StackFrame)(nil), "opencensus.proto.trace.v1.StackTrace.StackFrame") - proto.RegisterType((*StackTrace_StackFrames)(nil), "opencensus.proto.trace.v1.StackTrace.StackFrames") - proto.RegisterType((*Module)(nil), "opencensus.proto.trace.v1.Module") - proto.RegisterType((*TruncatableString)(nil), "opencensus.proto.trace.v1.TruncatableString") -} - -func init() { - proto.RegisterFile("opencensus/proto/trace/v1/trace.proto", fileDescriptor_8ea38bbb821bf584) -} - -var fileDescriptor_8ea38bbb821bf584 = []byte{ - // 1557 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x58, 0xeb, 0x52, 0x1b, 0x47, - 0x16, 0x66, 0x74, 0xd7, 0x91, 0x90, 0x45, 0x1b, 0xdb, 0x83, 0xd6, 0xbb, 0x66, 0x65, 0x7b, 0x17, - 0xaf, 0x17, 0x61, 0xb0, 0xd7, 0xe5, 0x6b, 0x79, 0x11, 0x88, 0x48, 0x06, 0x2b, 0x72, 0x4b, 0xa6, - 0x72, 0xa9, 0xd4, 0xd4, 0x48, 0xd3, 0x88, 0x09, 0x52, 0xcf, 0x64, 0xa6, 0x87, 0x14, 0x7e, 0x81, - 0x54, 0x2a, 0xff, 0x52, 0x95, 0xca, 0x0b, 0xe4, 0x47, 0x5e, 0x24, 0x0f, 0x90, 0xca, 0x73, 0xe4, - 0x09, 0xf2, 0x27, 0xd5, 0xdd, 0x73, 0x13, 0xd8, 0xa0, 0xc8, 0x7f, 0xa8, 0x9e, 0xee, 0xf3, 0x7d, - 0x7d, 0x4e, 0x9f, 0x2b, 0x82, 0xdb, 0x96, 0x4d, 0xe8, 0x80, 0x50, 0xd7, 0x73, 0xd7, 0x6c, 0xc7, - 0x62, 0xd6, 0x1a, 0x73, 0xf4, 0x01, 0x59, 0x3b, 0x5e, 0x97, 0x8b, 0x9a, 0xd8, 0x44, 0x4b, 0x91, - 0x98, 0xdc, 0xa9, 0xc9, 0xd3, 0xe3, 0xf5, 0xca, 0xdd, 0x33, 0x0c, 0x0e, 0x71, 0x2d, 0xcf, 0x91, - 0x24, 0xc1, 0x5a, 0xa2, 0x2a, 0x37, 0x86, 0x96, 0x35, 0x1c, 0x11, 0x29, 0xd8, 0xf7, 0x0e, 0xd6, - 0x98, 0x39, 0x26, 0x2e, 0xd3, 0xc7, 0xb6, 0x2f, 0xf0, 0x8f, 0xd3, 0x02, 0x5f, 0x3b, 0xba, 0x6d, - 0x13, 0xc7, 0xbf, 0xb6, 0xfa, 0xcb, 0x15, 0x48, 0x75, 0x6d, 0x9d, 0xa2, 0x25, 0xc8, 0x09, 0x15, - 0x34, 0xd3, 0x50, 0x95, 0x65, 0x65, 0xa5, 0x88, 0xb3, 0xe2, 0xbb, 0x65, 0xa0, 0x6b, 0x90, 0x75, - 0x6d, 0x9d, 0xf2, 0x93, 0x84, 0x38, 0xc9, 0xf0, 0xcf, 0x96, 0x81, 0x5e, 0x02, 0x08, 0x19, 0x97, - 0xe9, 0x8c, 0xa8, 0x97, 0x96, 0x95, 0x95, 0xc2, 0xc6, 0x7f, 0x6a, 0xef, 0x35, 0xad, 0xc6, 0x2f, - 0xaa, 0xf5, 0x42, 0x04, 0x8e, 0xa1, 0xd1, 0x2d, 0x28, 0xd9, 0xba, 0x43, 0x28, 0xd3, 0x82, 0xbb, - 0x92, 0xe2, 0xae, 0xa2, 0xdc, 0xed, 0xca, 0x1b, 0xff, 0x0f, 0x29, 0xaa, 0x8f, 0x89, 0x9a, 0x12, - 0x77, 0xfd, 0xf7, 0x9c, 0xbb, 0x7a, 0x8e, 0x47, 0x07, 0x3a, 0xd3, 0xfb, 0x23, 0xd2, 0x65, 0x8e, - 0x49, 0x87, 0x58, 0x20, 0xd1, 0x33, 0x48, 0x1d, 0x99, 0xd4, 0x50, 0x4b, 0xcb, 0xca, 0x4a, 0x69, - 0x63, 0xe5, 0x22, 0x6d, 0xf9, 0x9f, 0x5d, 0x93, 0x1a, 0x58, 0xa0, 0xd0, 0x63, 0x00, 0x97, 0xe9, - 0x0e, 0xd3, 0xf8, 0x3b, 0xab, 0x69, 0xa1, 0x45, 0xa5, 0x26, 0xdf, 0xb8, 0x16, 0xbc, 0x71, 0xad, - 0x17, 0x38, 0x01, 0xe7, 0x85, 0x34, 0xff, 0x46, 0xff, 0x83, 0x1c, 0xa1, 0x86, 0x04, 0x66, 0x2e, - 0x04, 0x66, 0x09, 0x35, 0x04, 0xec, 0x25, 0x80, 0xce, 0x98, 0x63, 0xf6, 0x3d, 0x46, 0x5c, 0x35, - 0x3b, 0xdd, 0x1b, 0x6f, 0x86, 0x08, 0x1c, 0x43, 0xa3, 0x1d, 0x28, 0xb8, 0x4c, 0x1f, 0x1c, 0x69, - 0x42, 0x5a, 0xcd, 0x09, 0xb2, 0xdb, 0xe7, 0x91, 0x71, 0x69, 0xe1, 0x30, 0x0c, 0x6e, 0xb8, 0x46, - 0xbb, 0x50, 0xe0, 0x66, 0x68, 0xe4, 0x98, 0x50, 0xe6, 0xaa, 0xf9, 0x29, 0x1d, 0x6f, 0x8e, 0x49, - 0x43, 0x20, 0x30, 0xb0, 0x70, 0x8d, 0x9e, 0x42, 0x7a, 0x64, 0xd2, 0x23, 0x57, 0x85, 0x8b, 0xd5, - 0xe1, 0x34, 0x7b, 0x5c, 0x18, 0x4b, 0x0c, 0x7a, 0x0c, 0x19, 0x1e, 0x3e, 0x9e, 0xab, 0x16, 0x04, - 0xfa, 0x9f, 0xe7, 0x1b, 0xc3, 0x3c, 0x17, 0xfb, 0x00, 0x54, 0x87, 0x5c, 0x90, 0x4c, 0x6a, 0x59, - 0x80, 0xff, 0x75, 0x16, 0x1c, 0xa6, 0xdb, 0xf1, 0x7a, 0x0d, 0xfb, 0x6b, 0x1c, 0xe2, 0xd0, 0x27, - 0xf0, 0x37, 0x57, 0x1f, 0x13, 0xcd, 0x76, 0xac, 0x01, 0x71, 0x5d, 0x4d, 0x77, 0xb5, 0x58, 0x10, - 0xab, 0xc5, 0xf7, 0xb8, 0xb9, 0x6e, 0x59, 0xa3, 0x7d, 0x7d, 0xe4, 0x11, 0x7c, 0x8d, 0xc3, 0x3b, - 0x12, 0xbd, 0xe9, 0x76, 0xc2, 0x50, 0x47, 0x3b, 0x50, 0x1e, 0x1c, 0x9a, 0x23, 0x43, 0x66, 0xc3, - 0xc0, 0xf2, 0x28, 0x53, 0xe7, 0x05, 0xdd, 0xf5, 0x33, 0x74, 0x6f, 0x5a, 0x94, 0xdd, 0xdf, 0x90, - 0x84, 0x25, 0x81, 0xe2, 0x14, 0x5b, 0x1c, 0x53, 0xf9, 0x56, 0x01, 0x88, 0x32, 0x0e, 0xbd, 0x84, - 0x2c, 0xa1, 0xcc, 0x31, 0x89, 0xab, 0x2a, 0xcb, 0xc9, 0x95, 0xc2, 0xc6, 0xbd, 0xe9, 0xd3, 0xb5, - 0xd6, 0xa0, 0xcc, 0x39, 0xc1, 0x01, 0x41, 0x65, 0x0d, 0xd2, 0x62, 0x07, 0x95, 0x21, 0x79, 0x44, - 0x4e, 0x44, 0xd5, 0xc8, 0x63, 0xbe, 0x44, 0x8b, 0x90, 0x3e, 0xe6, 0xea, 0x88, 0x7a, 0x91, 0xc7, - 0xf2, 0xa3, 0xf2, 0x43, 0x02, 0x20, 0x8a, 0x4c, 0xa4, 0xc3, 0x7c, 0x18, 0x9b, 0xda, 0x58, 0xb7, - 0x7d, 0x8d, 0x9e, 0x4d, 0x1f, 0xdc, 0xd1, 0xf2, 0x95, 0x6e, 0x4b, 0xed, 0x8a, 0x7a, 0x6c, 0x0b, - 0x3d, 0x02, 0xd5, 0x70, 0x2c, 0xdb, 0x26, 0x86, 0x16, 0xa5, 0x81, 0xff, 0x9a, 0x5c, 0xb5, 0x34, - 0xbe, 0xea, 0x9f, 0x47, 0xa4, 0xf2, 0xdd, 0xbe, 0x84, 0x85, 0x33, 0xe4, 0xef, 0x30, 0xf4, 0x45, - 0xdc, 0xd0, 0xc2, 0xc6, 0x9d, 0x73, 0x74, 0x0f, 0xe9, 0xa4, 0xa3, 0x24, 0xee, 0x49, 0xe2, 0x91, - 0x52, 0xf9, 0x29, 0x0d, 0xf9, 0x30, 0x39, 0x50, 0x0d, 0x52, 0xa2, 0x46, 0x28, 0x17, 0xd6, 0x08, - 0x21, 0x87, 0xf6, 0x01, 0x74, 0x4a, 0x2d, 0xa6, 0x33, 0xd3, 0xa2, 0xbe, 0x1e, 0x0f, 0xa6, 0xce, - 0xc5, 0xda, 0x66, 0x88, 0x6d, 0xce, 0xe1, 0x18, 0x13, 0xfa, 0x02, 0xe6, 0xc7, 0xc4, 0x75, 0xf5, - 0xa1, 0x9f, 0xe7, 0xa2, 0x1e, 0x17, 0x36, 0x1e, 0x4e, 0x4f, 0xfd, 0x4a, 0xc2, 0xc5, 0x47, 0x73, - 0x0e, 0x17, 0xc7, 0xb1, 0xef, 0xca, 0xcf, 0x0a, 0x40, 0x74, 0x37, 0x6a, 0x43, 0xc1, 0x20, 0xee, - 0xc0, 0x31, 0x6d, 0x61, 0x86, 0x32, 0x43, 0x7d, 0x8f, 0x13, 0x9c, 0x2a, 0x9b, 0x89, 0x0f, 0x29, - 0x9b, 0x95, 0x3f, 0x14, 0x28, 0xc6, 0x6d, 0x41, 0x1f, 0x43, 0x8a, 0x9d, 0xd8, 0xd2, 0x45, 0xa5, - 0x8d, 0xa7, 0xb3, 0xbd, 0x48, 0xad, 0x77, 0x62, 0x13, 0x2c, 0x88, 0x50, 0x09, 0x12, 0x7e, 0x73, - 0x4d, 0xe1, 0x84, 0x69, 0xa0, 0xbb, 0xb0, 0xe0, 0xd1, 0x81, 0x35, 0xb6, 0x1d, 0xe2, 0xba, 0xc4, - 0xd0, 0x5c, 0xf3, 0x2d, 0x11, 0xef, 0x9f, 0xc2, 0xe5, 0xf8, 0x41, 0xd7, 0x7c, 0x4b, 0xd0, 0xbf, - 0xe1, 0xd2, 0x69, 0xd1, 0x94, 0x10, 0x2d, 0x4d, 0x0a, 0x56, 0x1f, 0x40, 0x8a, 0xdf, 0x89, 0x16, - 0xa1, 0xdc, 0xfb, 0xb4, 0xd3, 0xd0, 0xde, 0xb4, 0xbb, 0x9d, 0xc6, 0x56, 0x6b, 0xa7, 0xd5, 0xd8, - 0x2e, 0xcf, 0xa1, 0x1c, 0xa4, 0xba, 0x8d, 0x76, 0xaf, 0xac, 0xa0, 0x22, 0xe4, 0x70, 0x63, 0xab, - 0xd1, 0xda, 0x6f, 0x6c, 0x97, 0x13, 0xf5, 0xac, 0x1f, 0xe2, 0x95, 0xdf, 0x78, 0x29, 0x89, 0xea, - 0x76, 0x13, 0x20, 0x6a, 0x02, 0x7e, 0xee, 0xde, 0x99, 0xfa, 0x29, 0x70, 0x3e, 0x6c, 0x01, 0xe8, - 0x09, 0x2c, 0x85, 0x59, 0x1a, 0x46, 0xc4, 0x64, 0x9a, 0x5e, 0x0b, 0xd2, 0x34, 0x3a, 0x17, 0x79, - 0x8a, 0x5e, 0xc0, 0xf5, 0x00, 0x3b, 0x11, 0xad, 0x01, 0x3c, 0x29, 0xe0, 0x01, 0x7f, 0xfc, 0xfd, - 0xfd, 0x44, 0xff, 0x3e, 0x01, 0x29, 0xde, 0x52, 0x66, 0x1a, 0x80, 0x9e, 0xfb, 0x81, 0x90, 0x14, - 0x81, 0x70, 0x67, 0x9a, 0xd6, 0x15, 0x77, 0xfb, 0x64, 0x90, 0xa6, 0x3e, 0x24, 0x48, 0xab, 0xbb, - 0xe7, 0x3a, 0xf7, 0x0a, 0x2c, 0x6c, 0x35, 0x5b, 0x7b, 0xdb, 0xda, 0x5e, 0xab, 0xbd, 0xdb, 0xd8, - 0xd6, 0xba, 0x9d, 0xcd, 0x76, 0x59, 0x41, 0x57, 0x01, 0x75, 0x36, 0x71, 0xa3, 0xdd, 0x9b, 0xd8, - 0x4f, 0x54, 0xbe, 0x82, 0xb4, 0x68, 0xb3, 0xe8, 0x11, 0xa4, 0x78, 0xa3, 0xf5, 0xdd, 0x7b, 0x6b, - 0x1a, 0x03, 0xb1, 0x40, 0xa0, 0x1a, 0x5c, 0x0e, 0x1c, 0x23, 0x5a, 0xf5, 0x84, 0x3b, 0x17, 0xfc, - 0x23, 0x71, 0x89, 0xf0, 0x43, 0xf5, 0x39, 0xe4, 0x82, 0x59, 0x0b, 0x2d, 0xc1, 0x15, 0xae, 0x88, - 0xb6, 0xdb, 0x6a, 0x6f, 0x9f, 0x32, 0x04, 0x20, 0xd3, 0x6d, 0xe0, 0xfd, 0x06, 0x2e, 0x2b, 0x7c, - 0xbd, 0xb5, 0xd7, 0xe2, 0x31, 0x9b, 0xa8, 0x3e, 0x84, 0x8c, 0xec, 0xef, 0x08, 0x41, 0x6a, 0x60, - 0x19, 0x32, 0x39, 0xd3, 0x58, 0xac, 0x91, 0x0a, 0x59, 0x3f, 0x3a, 0xfc, 0x8e, 0x14, 0x7c, 0x56, - 0x7f, 0x55, 0xa0, 0x34, 0x59, 0x99, 0xd1, 0x6b, 0x28, 0xba, 0xa2, 0xa2, 0x68, 0xb2, 0xb4, 0xcf, - 0x50, 0x8b, 0x9a, 0x73, 0xb8, 0x20, 0x39, 0x24, 0xe5, 0xdf, 0x21, 0x6f, 0x52, 0xa6, 0x45, 0xad, - 0x22, 0xd9, 0x9c, 0xc3, 0x39, 0x93, 0x32, 0x79, 0x7c, 0x03, 0xa0, 0x6f, 0x59, 0x23, 0xff, 0x9c, - 0x07, 0x53, 0xae, 0x39, 0x87, 0xf3, 0xfd, 0x60, 0x4c, 0x40, 0x37, 0xa1, 0x68, 0x58, 0x5e, 0x7f, - 0x44, 0x7c, 0x11, 0x1e, 0x2a, 0x0a, 0xbf, 0x44, 0xee, 0x0a, 0xa1, 0x30, 0x51, 0xab, 0xdf, 0x65, - 0x00, 0xa2, 0xc9, 0x0d, 0xf5, 0xb8, 0x3d, 0x7c, 0xea, 0x3b, 0x70, 0xf4, 0xb1, 0x68, 0xfc, 0xdc, - 0x9e, 0xf5, 0xa9, 0xc6, 0x3e, 0xb9, 0xdc, 0x11, 0x40, 0x2c, 0x87, 0x47, 0xf9, 0x81, 0x56, 0xe1, - 0x72, 0x6c, 0x96, 0xd4, 0x0e, 0x75, 0xf7, 0x50, 0x0b, 0x6b, 0x58, 0x39, 0x1a, 0x16, 0x9b, 0xba, - 0x7b, 0xd8, 0x32, 0x2a, 0xbf, 0x27, 0x7d, 0x9d, 0x04, 0x1c, 0xbd, 0x86, 0xf9, 0x03, 0x8f, 0x0e, - 0x78, 0x22, 0x6b, 0x62, 0xa0, 0x9f, 0xa5, 0xe0, 0x17, 0x03, 0x8a, 0x36, 0xa7, 0xec, 0xc3, 0x55, - 0xcb, 0x31, 0x87, 0x26, 0xd5, 0x47, 0xda, 0x24, 0x77, 0x62, 0x06, 0xee, 0xc5, 0x80, 0x6b, 0x27, - 0x7e, 0x47, 0x0b, 0xf2, 0x07, 0xe6, 0x88, 0x48, 0xda, 0xe4, 0x0c, 0xb4, 0x39, 0x0e, 0x17, 0x54, - 0x37, 0xa0, 0x30, 0x32, 0x29, 0xd1, 0xa8, 0x37, 0xee, 0x13, 0x47, 0x78, 0x34, 0x89, 0x81, 0x6f, - 0xb5, 0xc5, 0x0e, 0xba, 0x09, 0xf3, 0x03, 0x6b, 0xe4, 0x8d, 0x69, 0x20, 0x92, 0x16, 0x22, 0x45, - 0xb9, 0xe9, 0x0b, 0xd5, 0xa1, 0x30, 0xb2, 0x74, 0x43, 0x1b, 0x5b, 0x86, 0x37, 0x0a, 0xfe, 0xaf, - 0x38, 0x6f, 0x08, 0x7e, 0x25, 0x04, 0x31, 0x70, 0x94, 0x5c, 0xa3, 0x2e, 0x94, 0xe4, 0x38, 0xab, - 0x1d, 0x13, 0xc7, 0xe5, 0xdd, 0x37, 0x3b, 0x83, 0x65, 0xf3, 0x92, 0x63, 0x5f, 0x52, 0x54, 0xbe, - 0x51, 0xa0, 0x10, 0x8b, 0x1d, 0xb4, 0x03, 0x69, 0x11, 0x7e, 0xd3, 0x8c, 0x9d, 0xef, 0x8a, 0x3e, - 0x2c, 0xe1, 0xe8, 0x1e, 0x2c, 0x06, 0x65, 0x45, 0x86, 0xf3, 0x44, 0x5d, 0x41, 0xfe, 0x99, 0xbc, - 0x54, 0x16, 0x96, 0x1f, 0x15, 0xc8, 0xf8, 0x96, 0x6e, 0x43, 0xc6, 0x7f, 0xa8, 0x59, 0xc2, 0xcd, - 0xc7, 0xa2, 0x8f, 0x20, 0xd7, 0xf7, 0xf8, 0x68, 0xee, 0x87, 0xfb, 0x5f, 0xe5, 0xc9, 0x0a, 0x74, - 0xcb, 0xa8, 0x7e, 0x0e, 0x0b, 0x67, 0x4e, 0xa3, 0xd1, 0x59, 0x89, 0x8d, 0xce, 0xdc, 0x6c, 0x26, - 0x45, 0x89, 0xa1, 0xf5, 0x4f, 0x18, 0x99, 0x34, 0x3b, 0x3c, 0xab, 0x9f, 0x30, 0x22, 0xcc, 0xae, - 0xdb, 0x70, 0xdd, 0xb4, 0xde, 0xaf, 0x57, 0x5d, 0xfe, 0x57, 0xd0, 0xe1, 0x9b, 0x1d, 0xe5, 0xb3, - 0xfa, 0xd0, 0x64, 0x87, 0x5e, 0xbf, 0x36, 0xb0, 0xc6, 0x6b, 0x52, 0x7e, 0xd5, 0xa4, 0x2e, 0x73, - 0xbc, 0x31, 0xa1, 0xb2, 0xdf, 0xae, 0x45, 0x54, 0xab, 0xf2, 0x67, 0x89, 0x21, 0xa1, 0xab, 0xc3, - 0xe8, 0xf7, 0x8d, 0x7e, 0x46, 0x6c, 0xdf, 0xff, 0x33, 0x00, 0x00, 0xff, 0xff, 0x1e, 0xe0, 0x94, - 0x45, 0x03, 0x11, 0x00, 0x00, -} diff --git a/cmd/bpa-operator/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace_config.pb.go b/cmd/bpa-operator/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace_config.pb.go deleted file mode 100644 index 2ac2d28..0000000 --- a/cmd/bpa-operator/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace_config.pb.go +++ /dev/null @@ -1,358 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: opencensus/proto/trace/v1/trace_config.proto - -package v1 - -import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -// How spans should be sampled: -// - Always off -// - Always on -// - Always follow the parent Span's decision (off if no parent). -type ConstantSampler_ConstantDecision int32 - -const ( - ConstantSampler_ALWAYS_OFF ConstantSampler_ConstantDecision = 0 - ConstantSampler_ALWAYS_ON ConstantSampler_ConstantDecision = 1 - ConstantSampler_ALWAYS_PARENT ConstantSampler_ConstantDecision = 2 -) - -var ConstantSampler_ConstantDecision_name = map[int32]string{ - 0: "ALWAYS_OFF", - 1: "ALWAYS_ON", - 2: "ALWAYS_PARENT", -} - -var ConstantSampler_ConstantDecision_value = map[string]int32{ - "ALWAYS_OFF": 0, - "ALWAYS_ON": 1, - "ALWAYS_PARENT": 2, -} - -func (x ConstantSampler_ConstantDecision) String() string { - return proto.EnumName(ConstantSampler_ConstantDecision_name, int32(x)) -} - -func (ConstantSampler_ConstantDecision) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_5359209b41ff50c5, []int{2, 0} -} - -// Global configuration of the trace service. All fields must be specified, or -// the default (zero) values will be used for each type. -type TraceConfig struct { - // The global default sampler used to make decisions on span sampling. - // - // Types that are valid to be assigned to Sampler: - // *TraceConfig_ProbabilitySampler - // *TraceConfig_ConstantSampler - // *TraceConfig_RateLimitingSampler - Sampler isTraceConfig_Sampler `protobuf_oneof:"sampler"` - // The global default max number of attributes per span. - MaxNumberOfAttributes int64 `protobuf:"varint,4,opt,name=max_number_of_attributes,json=maxNumberOfAttributes,proto3" json:"max_number_of_attributes,omitempty"` - // The global default max number of annotation events per span. - MaxNumberOfAnnotations int64 `protobuf:"varint,5,opt,name=max_number_of_annotations,json=maxNumberOfAnnotations,proto3" json:"max_number_of_annotations,omitempty"` - // The global default max number of message events per span. - MaxNumberOfMessageEvents int64 `protobuf:"varint,6,opt,name=max_number_of_message_events,json=maxNumberOfMessageEvents,proto3" json:"max_number_of_message_events,omitempty"` - // The global default max number of link entries per span. - MaxNumberOfLinks int64 `protobuf:"varint,7,opt,name=max_number_of_links,json=maxNumberOfLinks,proto3" json:"max_number_of_links,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *TraceConfig) Reset() { *m = TraceConfig{} } -func (m *TraceConfig) String() string { return proto.CompactTextString(m) } -func (*TraceConfig) ProtoMessage() {} -func (*TraceConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_5359209b41ff50c5, []int{0} -} - -func (m *TraceConfig) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TraceConfig.Unmarshal(m, b) -} -func (m *TraceConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TraceConfig.Marshal(b, m, deterministic) -} -func (m *TraceConfig) XXX_Merge(src proto.Message) { - xxx_messageInfo_TraceConfig.Merge(m, src) -} -func (m *TraceConfig) XXX_Size() int { - return xxx_messageInfo_TraceConfig.Size(m) -} -func (m *TraceConfig) XXX_DiscardUnknown() { - xxx_messageInfo_TraceConfig.DiscardUnknown(m) -} - -var xxx_messageInfo_TraceConfig proto.InternalMessageInfo - -type isTraceConfig_Sampler interface { - isTraceConfig_Sampler() -} - -type TraceConfig_ProbabilitySampler struct { - ProbabilitySampler *ProbabilitySampler `protobuf:"bytes,1,opt,name=probability_sampler,json=probabilitySampler,proto3,oneof"` -} - -type TraceConfig_ConstantSampler struct { - ConstantSampler *ConstantSampler `protobuf:"bytes,2,opt,name=constant_sampler,json=constantSampler,proto3,oneof"` -} - -type TraceConfig_RateLimitingSampler struct { - RateLimitingSampler *RateLimitingSampler `protobuf:"bytes,3,opt,name=rate_limiting_sampler,json=rateLimitingSampler,proto3,oneof"` -} - -func (*TraceConfig_ProbabilitySampler) isTraceConfig_Sampler() {} - -func (*TraceConfig_ConstantSampler) isTraceConfig_Sampler() {} - -func (*TraceConfig_RateLimitingSampler) isTraceConfig_Sampler() {} - -func (m *TraceConfig) GetSampler() isTraceConfig_Sampler { - if m != nil { - return m.Sampler - } - return nil -} - -func (m *TraceConfig) GetProbabilitySampler() *ProbabilitySampler { - if x, ok := m.GetSampler().(*TraceConfig_ProbabilitySampler); ok { - return x.ProbabilitySampler - } - return nil -} - -func (m *TraceConfig) GetConstantSampler() *ConstantSampler { - if x, ok := m.GetSampler().(*TraceConfig_ConstantSampler); ok { - return x.ConstantSampler - } - return nil -} - -func (m *TraceConfig) GetRateLimitingSampler() *RateLimitingSampler { - if x, ok := m.GetSampler().(*TraceConfig_RateLimitingSampler); ok { - return x.RateLimitingSampler - } - return nil -} - -func (m *TraceConfig) GetMaxNumberOfAttributes() int64 { - if m != nil { - return m.MaxNumberOfAttributes - } - return 0 -} - -func (m *TraceConfig) GetMaxNumberOfAnnotations() int64 { - if m != nil { - return m.MaxNumberOfAnnotations - } - return 0 -} - -func (m *TraceConfig) GetMaxNumberOfMessageEvents() int64 { - if m != nil { - return m.MaxNumberOfMessageEvents - } - return 0 -} - -func (m *TraceConfig) GetMaxNumberOfLinks() int64 { - if m != nil { - return m.MaxNumberOfLinks - } - return 0 -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*TraceConfig) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*TraceConfig_ProbabilitySampler)(nil), - (*TraceConfig_ConstantSampler)(nil), - (*TraceConfig_RateLimitingSampler)(nil), - } -} - -// Sampler that tries to uniformly sample traces with a given probability. -// The probability of sampling a trace is equal to that of the specified probability. -type ProbabilitySampler struct { - // The desired probability of sampling. Must be within [0.0, 1.0]. - SamplingProbability float64 `protobuf:"fixed64,1,opt,name=samplingProbability,proto3" json:"samplingProbability,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ProbabilitySampler) Reset() { *m = ProbabilitySampler{} } -func (m *ProbabilitySampler) String() string { return proto.CompactTextString(m) } -func (*ProbabilitySampler) ProtoMessage() {} -func (*ProbabilitySampler) Descriptor() ([]byte, []int) { - return fileDescriptor_5359209b41ff50c5, []int{1} -} - -func (m *ProbabilitySampler) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ProbabilitySampler.Unmarshal(m, b) -} -func (m *ProbabilitySampler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ProbabilitySampler.Marshal(b, m, deterministic) -} -func (m *ProbabilitySampler) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProbabilitySampler.Merge(m, src) -} -func (m *ProbabilitySampler) XXX_Size() int { - return xxx_messageInfo_ProbabilitySampler.Size(m) -} -func (m *ProbabilitySampler) XXX_DiscardUnknown() { - xxx_messageInfo_ProbabilitySampler.DiscardUnknown(m) -} - -var xxx_messageInfo_ProbabilitySampler proto.InternalMessageInfo - -func (m *ProbabilitySampler) GetSamplingProbability() float64 { - if m != nil { - return m.SamplingProbability - } - return 0 -} - -// Sampler that always makes a constant decision on span sampling. -type ConstantSampler struct { - Decision ConstantSampler_ConstantDecision `protobuf:"varint,1,opt,name=decision,proto3,enum=opencensus.proto.trace.v1.ConstantSampler_ConstantDecision" json:"decision,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ConstantSampler) Reset() { *m = ConstantSampler{} } -func (m *ConstantSampler) String() string { return proto.CompactTextString(m) } -func (*ConstantSampler) ProtoMessage() {} -func (*ConstantSampler) Descriptor() ([]byte, []int) { - return fileDescriptor_5359209b41ff50c5, []int{2} -} - -func (m *ConstantSampler) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ConstantSampler.Unmarshal(m, b) -} -func (m *ConstantSampler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ConstantSampler.Marshal(b, m, deterministic) -} -func (m *ConstantSampler) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConstantSampler.Merge(m, src) -} -func (m *ConstantSampler) XXX_Size() int { - return xxx_messageInfo_ConstantSampler.Size(m) -} -func (m *ConstantSampler) XXX_DiscardUnknown() { - xxx_messageInfo_ConstantSampler.DiscardUnknown(m) -} - -var xxx_messageInfo_ConstantSampler proto.InternalMessageInfo - -func (m *ConstantSampler) GetDecision() ConstantSampler_ConstantDecision { - if m != nil { - return m.Decision - } - return ConstantSampler_ALWAYS_OFF -} - -// Sampler that tries to sample with a rate per time window. -type RateLimitingSampler struct { - // Rate per second. - Qps int64 `protobuf:"varint,1,opt,name=qps,proto3" json:"qps,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RateLimitingSampler) Reset() { *m = RateLimitingSampler{} } -func (m *RateLimitingSampler) String() string { return proto.CompactTextString(m) } -func (*RateLimitingSampler) ProtoMessage() {} -func (*RateLimitingSampler) Descriptor() ([]byte, []int) { - return fileDescriptor_5359209b41ff50c5, []int{3} -} - -func (m *RateLimitingSampler) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RateLimitingSampler.Unmarshal(m, b) -} -func (m *RateLimitingSampler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RateLimitingSampler.Marshal(b, m, deterministic) -} -func (m *RateLimitingSampler) XXX_Merge(src proto.Message) { - xxx_messageInfo_RateLimitingSampler.Merge(m, src) -} -func (m *RateLimitingSampler) XXX_Size() int { - return xxx_messageInfo_RateLimitingSampler.Size(m) -} -func (m *RateLimitingSampler) XXX_DiscardUnknown() { - xxx_messageInfo_RateLimitingSampler.DiscardUnknown(m) -} - -var xxx_messageInfo_RateLimitingSampler proto.InternalMessageInfo - -func (m *RateLimitingSampler) GetQps() int64 { - if m != nil { - return m.Qps - } - return 0 -} - -func init() { - proto.RegisterEnum("opencensus.proto.trace.v1.ConstantSampler_ConstantDecision", ConstantSampler_ConstantDecision_name, ConstantSampler_ConstantDecision_value) - proto.RegisterType((*TraceConfig)(nil), "opencensus.proto.trace.v1.TraceConfig") - proto.RegisterType((*ProbabilitySampler)(nil), "opencensus.proto.trace.v1.ProbabilitySampler") - proto.RegisterType((*ConstantSampler)(nil), "opencensus.proto.trace.v1.ConstantSampler") - proto.RegisterType((*RateLimitingSampler)(nil), "opencensus.proto.trace.v1.RateLimitingSampler") -} - -func init() { - proto.RegisterFile("opencensus/proto/trace/v1/trace_config.proto", fileDescriptor_5359209b41ff50c5) -} - -var fileDescriptor_5359209b41ff50c5 = []byte{ - // 486 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0xc1, 0x4e, 0xdb, 0x40, - 0x10, 0x86, 0x31, 0xa1, 0x50, 0x06, 0x01, 0xee, 0x5a, 0x54, 0x46, 0xe2, 0x80, 0x7c, 0x29, 0xaa, - 0x6a, 0xbb, 0xd0, 0x43, 0x55, 0x55, 0xaa, 0x94, 0x00, 0x51, 0x0f, 0x69, 0x88, 0x0c, 0x52, 0xd4, - 0x5e, 0xdc, 0xb5, 0xd9, 0xb8, 0xab, 0xc6, 0xb3, 0xae, 0x77, 0x1d, 0xd1, 0x77, 0xe9, 0x43, 0xf4, - 0x11, 0xab, 0xac, 0x5d, 0xdb, 0x49, 0x00, 0x71, 0xdb, 0xf9, 0xff, 0xf9, 0x7e, 0xaf, 0xbc, 0x33, - 0xf0, 0x46, 0x64, 0x0c, 0x63, 0x86, 0xb2, 0x90, 0x7e, 0x96, 0x0b, 0x25, 0x7c, 0x95, 0xd3, 0x98, - 0xf9, 0xb3, 0xd3, 0xf2, 0x10, 0xc6, 0x02, 0x27, 0x3c, 0xf1, 0xb4, 0x47, 0x0e, 0x9b, 0xee, 0x52, - 0xf1, 0x74, 0x93, 0x37, 0x3b, 0x75, 0xfe, 0x6c, 0xc0, 0xce, 0xcd, 0xbc, 0x38, 0xd7, 0x00, 0xf9, - 0x0e, 0x56, 0x96, 0x8b, 0x88, 0x46, 0x7c, 0xca, 0xd5, 0xef, 0x50, 0xd2, 0x34, 0x9b, 0xb2, 0xdc, - 0x36, 0x8e, 0x8d, 0x93, 0x9d, 0x33, 0xd7, 0x7b, 0x30, 0xc8, 0x1b, 0x35, 0xd4, 0x75, 0x09, 0x7d, - 0x5e, 0x0b, 0x48, 0xb6, 0xa2, 0x92, 0x31, 0x98, 0xb1, 0x40, 0xa9, 0x28, 0xaa, 0x3a, 0x7e, 0x5d, - 0xc7, 0xbf, 0x7e, 0x24, 0xfe, 0xbc, 0x42, 0x9a, 0xec, 0xfd, 0x78, 0x51, 0x22, 0xb7, 0x70, 0x90, - 0x53, 0xc5, 0xc2, 0x29, 0x4f, 0xb9, 0xe2, 0x98, 0xd4, 0xe9, 0x1d, 0x9d, 0xee, 0x3d, 0x92, 0x1e, - 0x50, 0xc5, 0x06, 0x15, 0xd6, 0x7c, 0xc1, 0xca, 0x57, 0x65, 0xf2, 0x1e, 0xec, 0x94, 0xde, 0x85, - 0x58, 0xa4, 0x11, 0xcb, 0x43, 0x31, 0x09, 0xa9, 0x52, 0x39, 0x8f, 0x0a, 0xc5, 0xa4, 0xbd, 0x71, - 0x6c, 0x9c, 0x74, 0x82, 0x83, 0x94, 0xde, 0x0d, 0xb5, 0x7d, 0x35, 0xe9, 0xd6, 0x26, 0xf9, 0x00, - 0x87, 0x4b, 0x20, 0xa2, 0x50, 0x54, 0x71, 0x81, 0xd2, 0x7e, 0xa6, 0xc9, 0x97, 0x6d, 0xb2, 0x71, - 0xc9, 0x27, 0x38, 0x5a, 0x44, 0x53, 0x26, 0x25, 0x4d, 0x58, 0xc8, 0x66, 0x0c, 0x95, 0xb4, 0x37, - 0x35, 0x6d, 0xb7, 0xe8, 0x2f, 0x65, 0xc3, 0xa5, 0xf6, 0x89, 0x0b, 0xd6, 0x22, 0x3f, 0xe5, 0xf8, - 0x53, 0xda, 0x5b, 0x1a, 0x33, 0x5b, 0xd8, 0x60, 0xae, 0xf7, 0xb6, 0x61, 0xab, 0xfa, 0x75, 0x4e, - 0x1f, 0xc8, 0xea, 0xc3, 0x92, 0xb7, 0x60, 0xe9, 0x06, 0x8e, 0x49, 0xcb, 0xd5, 0x43, 0x62, 0x04, - 0xf7, 0x59, 0xce, 0x5f, 0x03, 0xf6, 0x97, 0x9e, 0x90, 0x8c, 0xe1, 0xf9, 0x2d, 0x8b, 0xb9, 0xe4, - 0x02, 0x35, 0xba, 0x77, 0xf6, 0xf1, 0xe9, 0x03, 0x50, 0xd7, 0x17, 0x55, 0x44, 0x50, 0x87, 0x39, - 0x17, 0x60, 0x2e, 0xbb, 0x64, 0x0f, 0xa0, 0x3b, 0x18, 0x77, 0xbf, 0x5e, 0x87, 0x57, 0xfd, 0xbe, - 0xb9, 0x46, 0x76, 0x61, 0xfb, 0x7f, 0x3d, 0x34, 0x0d, 0xf2, 0x02, 0x76, 0xab, 0x72, 0xd4, 0x0d, - 0x2e, 0x87, 0x37, 0xe6, 0xba, 0xf3, 0x0a, 0xac, 0x7b, 0xc6, 0x82, 0x98, 0xd0, 0xf9, 0x95, 0x49, - 0x7d, 0xe1, 0x4e, 0x30, 0x3f, 0xf6, 0x66, 0x70, 0xc4, 0xc5, 0xc3, 0x37, 0xef, 0x99, 0xad, 0xfd, - 0x1a, 0xcd, 0xad, 0x91, 0xf1, 0xad, 0x97, 0x70, 0xf5, 0xa3, 0x88, 0xbc, 0x58, 0xa4, 0x7e, 0x49, - 0xb9, 0x1c, 0xa5, 0xca, 0x8b, 0x94, 0x61, 0xf9, 0xea, 0x7e, 0x13, 0xe8, 0x96, 0x1b, 0x9e, 0x30, - 0x74, 0x93, 0x66, 0xd1, 0xa3, 0x4d, 0x2d, 0xbf, 0xfb, 0x17, 0x00, 0x00, 0xff, 0xff, 0x13, 0xe2, - 0xd9, 0x56, 0x0c, 0x04, 0x00, 0x00, -} diff --git a/cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/LICENSE b/cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/LICENSE deleted file mode 100644 index e06d208..0000000 --- a/cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/NOTICE b/cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/NOTICE deleted file mode 100644 index e520005..0000000 --- a/cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/NOTICE +++ /dev/null @@ -1,5 +0,0 @@ -CoreOS Project -Copyright 2015 CoreOS, Inc - -This product includes software developed at CoreOS, Inc. -(http://www.coreos.com/). diff --git a/cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/apis/monitoring/register.go b/cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/apis/monitoring/register.go deleted file mode 100644 index a9914fb..0000000 --- a/cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/apis/monitoring/register.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2018 The prometheus-operator Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package monitoring - -const ( - GroupName = "monitoring.coreos.com" -) diff --git a/cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1/crd_kinds.go b/cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1/crd_kinds.go deleted file mode 100644 index 8e3ab87..0000000 --- a/cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1/crd_kinds.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2018 The prometheus-operator Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -import ( - "fmt" - "strings" -) - -type CrdKind struct { - Kind string - Plural string - SpecName string -} - -type CrdKinds struct { - KindsString string - Prometheus CrdKind - Alertmanager CrdKind - ServiceMonitor CrdKind - PrometheusRule CrdKind -} - -var DefaultCrdKinds = CrdKinds{ - KindsString: "", - Prometheus: CrdKind{Plural: PrometheusName, Kind: PrometheusesKind, SpecName: "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.Prometheus"}, - ServiceMonitor: CrdKind{Plural: ServiceMonitorName, Kind: ServiceMonitorsKind, SpecName: "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.ServiceMonitor"}, - Alertmanager: CrdKind{Plural: AlertmanagerName, Kind: AlertmanagersKind, SpecName: "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.Alertmanager"}, - PrometheusRule: CrdKind{Plural: PrometheusRuleName, Kind: PrometheusRuleKind, SpecName: "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.PrometheusRule"}, -} - -// Implement the flag.Value interface -func (crdkinds *CrdKinds) String() string { - return crdkinds.KindsString -} - -// Set Implement the flag.Set interface -func (crdkinds *CrdKinds) Set(value string) error { - *crdkinds = DefaultCrdKinds - if value == "" { - value = fmt.Sprintf("%s=%s:%s,%s=%s:%s,%s=%s:%s,%s=%s:%s", - PrometheusKindKey, PrometheusesKind, PrometheusName, - AlertManagerKindKey, AlertmanagersKind, AlertmanagerName, - ServiceMonitorKindKey, ServiceMonitorsKind, ServiceMonitorName, - PrometheusRuleKindKey, PrometheusRuleKind, PrometheusRuleName, - ) - } - splited := strings.Split(value, ",") - for _, pair := range splited { - sp := strings.Split(pair, "=") - kind := strings.Split(sp[1], ":") - crdKind := CrdKind{Plural: kind[1], Kind: kind[0]} - switch kindKey := sp[0]; kindKey { - case PrometheusKindKey: - (*crdkinds).Prometheus = crdKind - case ServiceMonitorKindKey: - (*crdkinds).ServiceMonitor = crdKind - case AlertManagerKindKey: - (*crdkinds).Alertmanager = crdKind - case PrometheusRuleKindKey: - (*crdkinds).PrometheusRule = crdKind - default: - fmt.Printf("Warning: unknown kind: %s... ignoring", kindKey) - } - - } - (*crdkinds).KindsString = value - return nil -} diff --git a/cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1/doc.go b/cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1/doc.go deleted file mode 100644 index 64c4725..0000000 --- a/cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2017 The prometheus-operator Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +k8s:deepcopy-gen=package -// +groupName=monitoring.coreos.com - -package v1 diff --git a/cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1/openapi_generated.go b/cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1/openapi_generated.go deleted file mode 100644 index 2010012..0000000 --- a/cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1/openapi_generated.go +++ /dev/null @@ -1,13966 +0,0 @@ -// +build !ignore_autogenerated - -// Copyright 2018 The prometheus-operator Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by openapi-gen. DO NOT EDIT. - -// This file was autogenerated by openapi-gen. Do not edit it manually! - -package v1 - -import ( - spec "github.com/go-openapi/spec" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - common "k8s.io/kube-openapi/pkg/common" -) - -func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition { - return map[string]common.OpenAPIDefinition{ - "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.APIServerConfig": schema_pkg_apis_monitoring_v1_APIServerConfig(ref), - "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.AlertingSpec": schema_pkg_apis_monitoring_v1_AlertingSpec(ref), - "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.Alertmanager": schema_pkg_apis_monitoring_v1_Alertmanager(ref), - "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.AlertmanagerEndpoints": schema_pkg_apis_monitoring_v1_AlertmanagerEndpoints(ref), - "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.AlertmanagerList": schema_pkg_apis_monitoring_v1_AlertmanagerList(ref), - "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.AlertmanagerSpec": schema_pkg_apis_monitoring_v1_AlertmanagerSpec(ref), - "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.AlertmanagerStatus": schema_pkg_apis_monitoring_v1_AlertmanagerStatus(ref), - "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.BasicAuth": schema_pkg_apis_monitoring_v1_BasicAuth(ref), - "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.Endpoint": schema_pkg_apis_monitoring_v1_Endpoint(ref), - "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.NamespaceSelector": schema_pkg_apis_monitoring_v1_NamespaceSelector(ref), - "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.Prometheus": schema_pkg_apis_monitoring_v1_Prometheus(ref), - "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.PrometheusList": schema_pkg_apis_monitoring_v1_PrometheusList(ref), - "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.PrometheusRule": schema_pkg_apis_monitoring_v1_PrometheusRule(ref), - "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.PrometheusRuleList": schema_pkg_apis_monitoring_v1_PrometheusRuleList(ref), - "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.PrometheusRuleSpec": schema_pkg_apis_monitoring_v1_PrometheusRuleSpec(ref), - "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.PrometheusSpec": schema_pkg_apis_monitoring_v1_PrometheusSpec(ref), - "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.PrometheusStatus": schema_pkg_apis_monitoring_v1_PrometheusStatus(ref), - "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.QuerySpec": schema_pkg_apis_monitoring_v1_QuerySpec(ref), - "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.QueueConfig": schema_pkg_apis_monitoring_v1_QueueConfig(ref), - "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.RelabelConfig": schema_pkg_apis_monitoring_v1_RelabelConfig(ref), - "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.RemoteReadSpec": schema_pkg_apis_monitoring_v1_RemoteReadSpec(ref), - "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.RemoteWriteSpec": schema_pkg_apis_monitoring_v1_RemoteWriteSpec(ref), - "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.Rule": schema_pkg_apis_monitoring_v1_Rule(ref), - "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.RuleGroup": schema_pkg_apis_monitoring_v1_RuleGroup(ref), - "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.Rules": schema_pkg_apis_monitoring_v1_Rules(ref), - "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.RulesAlert": schema_pkg_apis_monitoring_v1_RulesAlert(ref), - "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.ServiceMonitor": schema_pkg_apis_monitoring_v1_ServiceMonitor(ref), - "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.ServiceMonitorList": schema_pkg_apis_monitoring_v1_ServiceMonitorList(ref), - "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.ServiceMonitorSpec": schema_pkg_apis_monitoring_v1_ServiceMonitorSpec(ref), - "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.StorageSpec": schema_pkg_apis_monitoring_v1_StorageSpec(ref), - "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.TLSConfig": schema_pkg_apis_monitoring_v1_TLSConfig(ref), - "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.ThanosGCSSpec": schema_pkg_apis_monitoring_v1_ThanosGCSSpec(ref), - "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.ThanosS3Spec": schema_pkg_apis_monitoring_v1_ThanosS3Spec(ref), - "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.ThanosSpec": schema_pkg_apis_monitoring_v1_ThanosSpec(ref), - "k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource": schema_k8sio_api_core_v1_AWSElasticBlockStoreVolumeSource(ref), - "k8s.io/api/core/v1.Affinity": schema_k8sio_api_core_v1_Affinity(ref), - "k8s.io/api/core/v1.AttachedVolume": schema_k8sio_api_core_v1_AttachedVolume(ref), - "k8s.io/api/core/v1.AvoidPods": schema_k8sio_api_core_v1_AvoidPods(ref), - "k8s.io/api/core/v1.AzureDiskVolumeSource": schema_k8sio_api_core_v1_AzureDiskVolumeSource(ref), - "k8s.io/api/core/v1.AzureFilePersistentVolumeSource": schema_k8sio_api_core_v1_AzureFilePersistentVolumeSource(ref), - "k8s.io/api/core/v1.AzureFileVolumeSource": schema_k8sio_api_core_v1_AzureFileVolumeSource(ref), - "k8s.io/api/core/v1.Binding": schema_k8sio_api_core_v1_Binding(ref), - "k8s.io/api/core/v1.CSIPersistentVolumeSource": schema_k8sio_api_core_v1_CSIPersistentVolumeSource(ref), - "k8s.io/api/core/v1.Capabilities": schema_k8sio_api_core_v1_Capabilities(ref), - "k8s.io/api/core/v1.CephFSPersistentVolumeSource": schema_k8sio_api_core_v1_CephFSPersistentVolumeSource(ref), - "k8s.io/api/core/v1.CephFSVolumeSource": schema_k8sio_api_core_v1_CephFSVolumeSource(ref), - "k8s.io/api/core/v1.CinderPersistentVolumeSource": schema_k8sio_api_core_v1_CinderPersistentVolumeSource(ref), - "k8s.io/api/core/v1.CinderVolumeSource": schema_k8sio_api_core_v1_CinderVolumeSource(ref), - "k8s.io/api/core/v1.ClientIPConfig": schema_k8sio_api_core_v1_ClientIPConfig(ref), - "k8s.io/api/core/v1.ComponentCondition": schema_k8sio_api_core_v1_ComponentCondition(ref), - "k8s.io/api/core/v1.ComponentStatus": schema_k8sio_api_core_v1_ComponentStatus(ref), - "k8s.io/api/core/v1.ComponentStatusList": schema_k8sio_api_core_v1_ComponentStatusList(ref), - "k8s.io/api/core/v1.ConfigMap": schema_k8sio_api_core_v1_ConfigMap(ref), - "k8s.io/api/core/v1.ConfigMapEnvSource": schema_k8sio_api_core_v1_ConfigMapEnvSource(ref), - "k8s.io/api/core/v1.ConfigMapKeySelector": schema_k8sio_api_core_v1_ConfigMapKeySelector(ref), - "k8s.io/api/core/v1.ConfigMapList": schema_k8sio_api_core_v1_ConfigMapList(ref), - "k8s.io/api/core/v1.ConfigMapNodeConfigSource": schema_k8sio_api_core_v1_ConfigMapNodeConfigSource(ref), - "k8s.io/api/core/v1.ConfigMapProjection": schema_k8sio_api_core_v1_ConfigMapProjection(ref), - "k8s.io/api/core/v1.ConfigMapVolumeSource": schema_k8sio_api_core_v1_ConfigMapVolumeSource(ref), - "k8s.io/api/core/v1.Container": schema_k8sio_api_core_v1_Container(ref), - "k8s.io/api/core/v1.ContainerImage": schema_k8sio_api_core_v1_ContainerImage(ref), - "k8s.io/api/core/v1.ContainerPort": schema_k8sio_api_core_v1_ContainerPort(ref), - "k8s.io/api/core/v1.ContainerState": schema_k8sio_api_core_v1_ContainerState(ref), - "k8s.io/api/core/v1.ContainerStateRunning": schema_k8sio_api_core_v1_ContainerStateRunning(ref), - "k8s.io/api/core/v1.ContainerStateTerminated": schema_k8sio_api_core_v1_ContainerStateTerminated(ref), - "k8s.io/api/core/v1.ContainerStateWaiting": schema_k8sio_api_core_v1_ContainerStateWaiting(ref), - "k8s.io/api/core/v1.ContainerStatus": schema_k8sio_api_core_v1_ContainerStatus(ref), - "k8s.io/api/core/v1.DaemonEndpoint": schema_k8sio_api_core_v1_DaemonEndpoint(ref), - "k8s.io/api/core/v1.DownwardAPIProjection": schema_k8sio_api_core_v1_DownwardAPIProjection(ref), - "k8s.io/api/core/v1.DownwardAPIVolumeFile": schema_k8sio_api_core_v1_DownwardAPIVolumeFile(ref), - "k8s.io/api/core/v1.DownwardAPIVolumeSource": schema_k8sio_api_core_v1_DownwardAPIVolumeSource(ref), - "k8s.io/api/core/v1.EmptyDirVolumeSource": schema_k8sio_api_core_v1_EmptyDirVolumeSource(ref), - "k8s.io/api/core/v1.EndpointAddress": schema_k8sio_api_core_v1_EndpointAddress(ref), - "k8s.io/api/core/v1.EndpointPort": schema_k8sio_api_core_v1_EndpointPort(ref), - "k8s.io/api/core/v1.EndpointSubset": schema_k8sio_api_core_v1_EndpointSubset(ref), - "k8s.io/api/core/v1.Endpoints": schema_k8sio_api_core_v1_Endpoints(ref), - "k8s.io/api/core/v1.EndpointsList": schema_k8sio_api_core_v1_EndpointsList(ref), - "k8s.io/api/core/v1.EnvFromSource": schema_k8sio_api_core_v1_EnvFromSource(ref), - "k8s.io/api/core/v1.EnvVar": schema_k8sio_api_core_v1_EnvVar(ref), - "k8s.io/api/core/v1.EnvVarSource": schema_k8sio_api_core_v1_EnvVarSource(ref), - "k8s.io/api/core/v1.Event": schema_k8sio_api_core_v1_Event(ref), - "k8s.io/api/core/v1.EventList": schema_k8sio_api_core_v1_EventList(ref), - "k8s.io/api/core/v1.EventSeries": schema_k8sio_api_core_v1_EventSeries(ref), - "k8s.io/api/core/v1.EventSource": schema_k8sio_api_core_v1_EventSource(ref), - "k8s.io/api/core/v1.ExecAction": schema_k8sio_api_core_v1_ExecAction(ref), - "k8s.io/api/core/v1.FCVolumeSource": schema_k8sio_api_core_v1_FCVolumeSource(ref), - "k8s.io/api/core/v1.FlexPersistentVolumeSource": schema_k8sio_api_core_v1_FlexPersistentVolumeSource(ref), - "k8s.io/api/core/v1.FlexVolumeSource": schema_k8sio_api_core_v1_FlexVolumeSource(ref), - "k8s.io/api/core/v1.FlockerVolumeSource": schema_k8sio_api_core_v1_FlockerVolumeSource(ref), - "k8s.io/api/core/v1.GCEPersistentDiskVolumeSource": schema_k8sio_api_core_v1_GCEPersistentDiskVolumeSource(ref), - "k8s.io/api/core/v1.GitRepoVolumeSource": schema_k8sio_api_core_v1_GitRepoVolumeSource(ref), - "k8s.io/api/core/v1.GlusterfsPersistentVolumeSource": schema_k8sio_api_core_v1_GlusterfsPersistentVolumeSource(ref), - "k8s.io/api/core/v1.GlusterfsVolumeSource": schema_k8sio_api_core_v1_GlusterfsVolumeSource(ref), - "k8s.io/api/core/v1.HTTPGetAction": schema_k8sio_api_core_v1_HTTPGetAction(ref), - "k8s.io/api/core/v1.HTTPHeader": schema_k8sio_api_core_v1_HTTPHeader(ref), - "k8s.io/api/core/v1.Handler": schema_k8sio_api_core_v1_Handler(ref), - "k8s.io/api/core/v1.HostAlias": schema_k8sio_api_core_v1_HostAlias(ref), - "k8s.io/api/core/v1.HostPathVolumeSource": schema_k8sio_api_core_v1_HostPathVolumeSource(ref), - "k8s.io/api/core/v1.ISCSIPersistentVolumeSource": schema_k8sio_api_core_v1_ISCSIPersistentVolumeSource(ref), - "k8s.io/api/core/v1.ISCSIVolumeSource": schema_k8sio_api_core_v1_ISCSIVolumeSource(ref), - "k8s.io/api/core/v1.KeyToPath": schema_k8sio_api_core_v1_KeyToPath(ref), - "k8s.io/api/core/v1.Lifecycle": schema_k8sio_api_core_v1_Lifecycle(ref), - "k8s.io/api/core/v1.LimitRange": schema_k8sio_api_core_v1_LimitRange(ref), - "k8s.io/api/core/v1.LimitRangeItem": schema_k8sio_api_core_v1_LimitRangeItem(ref), - "k8s.io/api/core/v1.LimitRangeList": schema_k8sio_api_core_v1_LimitRangeList(ref), - "k8s.io/api/core/v1.LimitRangeSpec": schema_k8sio_api_core_v1_LimitRangeSpec(ref), - "k8s.io/api/core/v1.List": schema_k8sio_api_core_v1_List(ref), - "k8s.io/api/core/v1.LoadBalancerIngress": schema_k8sio_api_core_v1_LoadBalancerIngress(ref), - "k8s.io/api/core/v1.LoadBalancerStatus": schema_k8sio_api_core_v1_LoadBalancerStatus(ref), - "k8s.io/api/core/v1.LocalObjectReference": schema_k8sio_api_core_v1_LocalObjectReference(ref), - "k8s.io/api/core/v1.LocalVolumeSource": schema_k8sio_api_core_v1_LocalVolumeSource(ref), - "k8s.io/api/core/v1.NFSVolumeSource": schema_k8sio_api_core_v1_NFSVolumeSource(ref), - "k8s.io/api/core/v1.Namespace": schema_k8sio_api_core_v1_Namespace(ref), - "k8s.io/api/core/v1.NamespaceList": schema_k8sio_api_core_v1_NamespaceList(ref), - "k8s.io/api/core/v1.NamespaceSpec": schema_k8sio_api_core_v1_NamespaceSpec(ref), - "k8s.io/api/core/v1.NamespaceStatus": schema_k8sio_api_core_v1_NamespaceStatus(ref), - "k8s.io/api/core/v1.Node": schema_k8sio_api_core_v1_Node(ref), - "k8s.io/api/core/v1.NodeAddress": schema_k8sio_api_core_v1_NodeAddress(ref), - "k8s.io/api/core/v1.NodeAffinity": schema_k8sio_api_core_v1_NodeAffinity(ref), - "k8s.io/api/core/v1.NodeCondition": schema_k8sio_api_core_v1_NodeCondition(ref), - "k8s.io/api/core/v1.NodeConfigSource": schema_k8sio_api_core_v1_NodeConfigSource(ref), - "k8s.io/api/core/v1.NodeConfigStatus": schema_k8sio_api_core_v1_NodeConfigStatus(ref), - "k8s.io/api/core/v1.NodeDaemonEndpoints": schema_k8sio_api_core_v1_NodeDaemonEndpoints(ref), - "k8s.io/api/core/v1.NodeList": schema_k8sio_api_core_v1_NodeList(ref), - "k8s.io/api/core/v1.NodeProxyOptions": schema_k8sio_api_core_v1_NodeProxyOptions(ref), - "k8s.io/api/core/v1.NodeResources": schema_k8sio_api_core_v1_NodeResources(ref), - "k8s.io/api/core/v1.NodeSelector": schema_k8sio_api_core_v1_NodeSelector(ref), - "k8s.io/api/core/v1.NodeSelectorRequirement": schema_k8sio_api_core_v1_NodeSelectorRequirement(ref), - "k8s.io/api/core/v1.NodeSelectorTerm": schema_k8sio_api_core_v1_NodeSelectorTerm(ref), - "k8s.io/api/core/v1.NodeSpec": schema_k8sio_api_core_v1_NodeSpec(ref), - "k8s.io/api/core/v1.NodeStatus": schema_k8sio_api_core_v1_NodeStatus(ref), - "k8s.io/api/core/v1.NodeSystemInfo": schema_k8sio_api_core_v1_NodeSystemInfo(ref), - "k8s.io/api/core/v1.ObjectFieldSelector": schema_k8sio_api_core_v1_ObjectFieldSelector(ref), - "k8s.io/api/core/v1.ObjectReference": schema_k8sio_api_core_v1_ObjectReference(ref), - "k8s.io/api/core/v1.PersistentVolume": schema_k8sio_api_core_v1_PersistentVolume(ref), - "k8s.io/api/core/v1.PersistentVolumeClaim": schema_k8sio_api_core_v1_PersistentVolumeClaim(ref), - "k8s.io/api/core/v1.PersistentVolumeClaimCondition": schema_k8sio_api_core_v1_PersistentVolumeClaimCondition(ref), - "k8s.io/api/core/v1.PersistentVolumeClaimList": schema_k8sio_api_core_v1_PersistentVolumeClaimList(ref), - "k8s.io/api/core/v1.PersistentVolumeClaimSpec": schema_k8sio_api_core_v1_PersistentVolumeClaimSpec(ref), - "k8s.io/api/core/v1.PersistentVolumeClaimStatus": schema_k8sio_api_core_v1_PersistentVolumeClaimStatus(ref), - "k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource": schema_k8sio_api_core_v1_PersistentVolumeClaimVolumeSource(ref), - "k8s.io/api/core/v1.PersistentVolumeList": schema_k8sio_api_core_v1_PersistentVolumeList(ref), - "k8s.io/api/core/v1.PersistentVolumeSource": schema_k8sio_api_core_v1_PersistentVolumeSource(ref), - "k8s.io/api/core/v1.PersistentVolumeSpec": schema_k8sio_api_core_v1_PersistentVolumeSpec(ref), - "k8s.io/api/core/v1.PersistentVolumeStatus": schema_k8sio_api_core_v1_PersistentVolumeStatus(ref), - "k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource": schema_k8sio_api_core_v1_PhotonPersistentDiskVolumeSource(ref), - "k8s.io/api/core/v1.Pod": schema_k8sio_api_core_v1_Pod(ref), - "k8s.io/api/core/v1.PodAffinity": schema_k8sio_api_core_v1_PodAffinity(ref), - "k8s.io/api/core/v1.PodAffinityTerm": schema_k8sio_api_core_v1_PodAffinityTerm(ref), - "k8s.io/api/core/v1.PodAntiAffinity": schema_k8sio_api_core_v1_PodAntiAffinity(ref), - "k8s.io/api/core/v1.PodAttachOptions": schema_k8sio_api_core_v1_PodAttachOptions(ref), - "k8s.io/api/core/v1.PodCondition": schema_k8sio_api_core_v1_PodCondition(ref), - "k8s.io/api/core/v1.PodDNSConfig": schema_k8sio_api_core_v1_PodDNSConfig(ref), - "k8s.io/api/core/v1.PodDNSConfigOption": schema_k8sio_api_core_v1_PodDNSConfigOption(ref), - "k8s.io/api/core/v1.PodExecOptions": schema_k8sio_api_core_v1_PodExecOptions(ref), - "k8s.io/api/core/v1.PodList": schema_k8sio_api_core_v1_PodList(ref), - "k8s.io/api/core/v1.PodLogOptions": schema_k8sio_api_core_v1_PodLogOptions(ref), - "k8s.io/api/core/v1.PodPortForwardOptions": schema_k8sio_api_core_v1_PodPortForwardOptions(ref), - "k8s.io/api/core/v1.PodProxyOptions": schema_k8sio_api_core_v1_PodProxyOptions(ref), - "k8s.io/api/core/v1.PodReadinessGate": schema_k8sio_api_core_v1_PodReadinessGate(ref), - "k8s.io/api/core/v1.PodSecurityContext": schema_k8sio_api_core_v1_PodSecurityContext(ref), - "k8s.io/api/core/v1.PodSignature": schema_k8sio_api_core_v1_PodSignature(ref), - "k8s.io/api/core/v1.PodSpec": schema_k8sio_api_core_v1_PodSpec(ref), - "k8s.io/api/core/v1.PodStatus": schema_k8sio_api_core_v1_PodStatus(ref), - "k8s.io/api/core/v1.PodStatusResult": schema_k8sio_api_core_v1_PodStatusResult(ref), - "k8s.io/api/core/v1.PodTemplate": schema_k8sio_api_core_v1_PodTemplate(ref), - "k8s.io/api/core/v1.PodTemplateList": schema_k8sio_api_core_v1_PodTemplateList(ref), - "k8s.io/api/core/v1.PodTemplateSpec": schema_k8sio_api_core_v1_PodTemplateSpec(ref), - "k8s.io/api/core/v1.PortworxVolumeSource": schema_k8sio_api_core_v1_PortworxVolumeSource(ref), - "k8s.io/api/core/v1.PreferAvoidPodsEntry": schema_k8sio_api_core_v1_PreferAvoidPodsEntry(ref), - "k8s.io/api/core/v1.PreferredSchedulingTerm": schema_k8sio_api_core_v1_PreferredSchedulingTerm(ref), - "k8s.io/api/core/v1.Probe": schema_k8sio_api_core_v1_Probe(ref), - "k8s.io/api/core/v1.ProjectedVolumeSource": schema_k8sio_api_core_v1_ProjectedVolumeSource(ref), - "k8s.io/api/core/v1.QuobyteVolumeSource": schema_k8sio_api_core_v1_QuobyteVolumeSource(ref), - "k8s.io/api/core/v1.RBDPersistentVolumeSource": schema_k8sio_api_core_v1_RBDPersistentVolumeSource(ref), - "k8s.io/api/core/v1.RBDVolumeSource": schema_k8sio_api_core_v1_RBDVolumeSource(ref), - "k8s.io/api/core/v1.RangeAllocation": schema_k8sio_api_core_v1_RangeAllocation(ref), - "k8s.io/api/core/v1.ReplicationController": schema_k8sio_api_core_v1_ReplicationController(ref), - "k8s.io/api/core/v1.ReplicationControllerCondition": schema_k8sio_api_core_v1_ReplicationControllerCondition(ref), - "k8s.io/api/core/v1.ReplicationControllerList": schema_k8sio_api_core_v1_ReplicationControllerList(ref), - "k8s.io/api/core/v1.ReplicationControllerSpec": schema_k8sio_api_core_v1_ReplicationControllerSpec(ref), - "k8s.io/api/core/v1.ReplicationControllerStatus": schema_k8sio_api_core_v1_ReplicationControllerStatus(ref), - "k8s.io/api/core/v1.ResourceFieldSelector": schema_k8sio_api_core_v1_ResourceFieldSelector(ref), - "k8s.io/api/core/v1.ResourceQuota": schema_k8sio_api_core_v1_ResourceQuota(ref), - "k8s.io/api/core/v1.ResourceQuotaList": schema_k8sio_api_core_v1_ResourceQuotaList(ref), - "k8s.io/api/core/v1.ResourceQuotaSpec": schema_k8sio_api_core_v1_ResourceQuotaSpec(ref), - "k8s.io/api/core/v1.ResourceQuotaStatus": schema_k8sio_api_core_v1_ResourceQuotaStatus(ref), - "k8s.io/api/core/v1.ResourceRequirements": schema_k8sio_api_core_v1_ResourceRequirements(ref), - "k8s.io/api/core/v1.SELinuxOptions": schema_k8sio_api_core_v1_SELinuxOptions(ref), - "k8s.io/api/core/v1.ScaleIOPersistentVolumeSource": schema_k8sio_api_core_v1_ScaleIOPersistentVolumeSource(ref), - "k8s.io/api/core/v1.ScaleIOVolumeSource": schema_k8sio_api_core_v1_ScaleIOVolumeSource(ref), - "k8s.io/api/core/v1.ScopeSelector": schema_k8sio_api_core_v1_ScopeSelector(ref), - "k8s.io/api/core/v1.ScopedResourceSelectorRequirement": schema_k8sio_api_core_v1_ScopedResourceSelectorRequirement(ref), - "k8s.io/api/core/v1.Secret": schema_k8sio_api_core_v1_Secret(ref), - "k8s.io/api/core/v1.SecretEnvSource": schema_k8sio_api_core_v1_SecretEnvSource(ref), - "k8s.io/api/core/v1.SecretKeySelector": schema_k8sio_api_core_v1_SecretKeySelector(ref), - "k8s.io/api/core/v1.SecretList": schema_k8sio_api_core_v1_SecretList(ref), - "k8s.io/api/core/v1.SecretProjection": schema_k8sio_api_core_v1_SecretProjection(ref), - "k8s.io/api/core/v1.SecretReference": schema_k8sio_api_core_v1_SecretReference(ref), - "k8s.io/api/core/v1.SecretVolumeSource": schema_k8sio_api_core_v1_SecretVolumeSource(ref), - "k8s.io/api/core/v1.SecurityContext": schema_k8sio_api_core_v1_SecurityContext(ref), - "k8s.io/api/core/v1.SerializedReference": schema_k8sio_api_core_v1_SerializedReference(ref), - "k8s.io/api/core/v1.Service": schema_k8sio_api_core_v1_Service(ref), - "k8s.io/api/core/v1.ServiceAccount": schema_k8sio_api_core_v1_ServiceAccount(ref), - "k8s.io/api/core/v1.ServiceAccountList": schema_k8sio_api_core_v1_ServiceAccountList(ref), - "k8s.io/api/core/v1.ServiceAccountTokenProjection": schema_k8sio_api_core_v1_ServiceAccountTokenProjection(ref), - "k8s.io/api/core/v1.ServiceList": schema_k8sio_api_core_v1_ServiceList(ref), - "k8s.io/api/core/v1.ServicePort": schema_k8sio_api_core_v1_ServicePort(ref), - "k8s.io/api/core/v1.ServiceProxyOptions": schema_k8sio_api_core_v1_ServiceProxyOptions(ref), - "k8s.io/api/core/v1.ServiceSpec": schema_k8sio_api_core_v1_ServiceSpec(ref), - "k8s.io/api/core/v1.ServiceStatus": schema_k8sio_api_core_v1_ServiceStatus(ref), - "k8s.io/api/core/v1.SessionAffinityConfig": schema_k8sio_api_core_v1_SessionAffinityConfig(ref), - "k8s.io/api/core/v1.StorageOSPersistentVolumeSource": schema_k8sio_api_core_v1_StorageOSPersistentVolumeSource(ref), - "k8s.io/api/core/v1.StorageOSVolumeSource": schema_k8sio_api_core_v1_StorageOSVolumeSource(ref), - "k8s.io/api/core/v1.Sysctl": schema_k8sio_api_core_v1_Sysctl(ref), - "k8s.io/api/core/v1.TCPSocketAction": schema_k8sio_api_core_v1_TCPSocketAction(ref), - "k8s.io/api/core/v1.Taint": schema_k8sio_api_core_v1_Taint(ref), - "k8s.io/api/core/v1.Toleration": schema_k8sio_api_core_v1_Toleration(ref), - "k8s.io/api/core/v1.TopologySelectorLabelRequirement": schema_k8sio_api_core_v1_TopologySelectorLabelRequirement(ref), - "k8s.io/api/core/v1.TopologySelectorTerm": schema_k8sio_api_core_v1_TopologySelectorTerm(ref), - "k8s.io/api/core/v1.TypedLocalObjectReference": schema_k8sio_api_core_v1_TypedLocalObjectReference(ref), - "k8s.io/api/core/v1.Volume": schema_k8sio_api_core_v1_Volume(ref), - "k8s.io/api/core/v1.VolumeDevice": schema_k8sio_api_core_v1_VolumeDevice(ref), - "k8s.io/api/core/v1.VolumeMount": schema_k8sio_api_core_v1_VolumeMount(ref), - "k8s.io/api/core/v1.VolumeNodeAffinity": schema_k8sio_api_core_v1_VolumeNodeAffinity(ref), - "k8s.io/api/core/v1.VolumeProjection": schema_k8sio_api_core_v1_VolumeProjection(ref), - "k8s.io/api/core/v1.VolumeSource": schema_k8sio_api_core_v1_VolumeSource(ref), - "k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource": schema_k8sio_api_core_v1_VsphereVirtualDiskVolumeSource(ref), - "k8s.io/api/core/v1.WeightedPodAffinityTerm": schema_k8sio_api_core_v1_WeightedPodAffinityTerm(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.APIGroup": schema_pkg_apis_meta_v1_APIGroup(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.APIGroupList": schema_pkg_apis_meta_v1_APIGroupList(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.APIResource": schema_pkg_apis_meta_v1_APIResource(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.APIResourceList": schema_pkg_apis_meta_v1_APIResourceList(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.APIVersions": schema_pkg_apis_meta_v1_APIVersions(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.CreateOptions": schema_pkg_apis_meta_v1_CreateOptions(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.DeleteOptions": schema_pkg_apis_meta_v1_DeleteOptions(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.Duration": schema_pkg_apis_meta_v1_Duration(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.ExportOptions": schema_pkg_apis_meta_v1_ExportOptions(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.GetOptions": schema_pkg_apis_meta_v1_GetOptions(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.GroupKind": schema_pkg_apis_meta_v1_GroupKind(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.GroupResource": schema_pkg_apis_meta_v1_GroupResource(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.GroupVersion": schema_pkg_apis_meta_v1_GroupVersion(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.GroupVersionForDiscovery": schema_pkg_apis_meta_v1_GroupVersionForDiscovery(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.GroupVersionKind": schema_pkg_apis_meta_v1_GroupVersionKind(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.GroupVersionResource": schema_pkg_apis_meta_v1_GroupVersionResource(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.Initializer": schema_pkg_apis_meta_v1_Initializer(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.Initializers": schema_pkg_apis_meta_v1_Initializers(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.InternalEvent": schema_pkg_apis_meta_v1_InternalEvent(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector": schema_pkg_apis_meta_v1_LabelSelector(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelectorRequirement": schema_pkg_apis_meta_v1_LabelSelectorRequirement(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.List": schema_pkg_apis_meta_v1_List(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta": schema_pkg_apis_meta_v1_ListMeta(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.ListOptions": schema_pkg_apis_meta_v1_ListOptions(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.MicroTime": schema_pkg_apis_meta_v1_MicroTime(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta": schema_pkg_apis_meta_v1_ObjectMeta(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.OwnerReference": schema_pkg_apis_meta_v1_OwnerReference(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.Patch": schema_pkg_apis_meta_v1_Patch(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.Preconditions": schema_pkg_apis_meta_v1_Preconditions(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.RootPaths": schema_pkg_apis_meta_v1_RootPaths(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.ServerAddressByClientCIDR": schema_pkg_apis_meta_v1_ServerAddressByClientCIDR(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.Status": schema_pkg_apis_meta_v1_Status(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.StatusCause": schema_pkg_apis_meta_v1_StatusCause(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.StatusDetails": schema_pkg_apis_meta_v1_StatusDetails(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.Time": schema_pkg_apis_meta_v1_Time(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.Timestamp": schema_pkg_apis_meta_v1_Timestamp(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.TypeMeta": schema_pkg_apis_meta_v1_TypeMeta(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.UpdateOptions": schema_pkg_apis_meta_v1_UpdateOptions(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.WatchEvent": schema_pkg_apis_meta_v1_WatchEvent(ref), - } -} - -func schema_pkg_apis_monitoring_v1_APIServerConfig(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "APIServerConfig defines a host and auth methods to access apiserver. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config", - Properties: map[string]spec.Schema{ - "host": { - SchemaProps: spec.SchemaProps{ - Description: "Host of apiserver. A valid string consisting of a hostname or IP followed by an optional port number", - Type: []string{"string"}, - Format: "", - }, - }, - "basicAuth": { - SchemaProps: spec.SchemaProps{ - Description: "BasicAuth allow an endpoint to authenticate over basic authentication", - Ref: ref("github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.BasicAuth"), - }, - }, - "bearerToken": { - SchemaProps: spec.SchemaProps{ - Description: "Bearer token for accessing apiserver.", - Type: []string{"string"}, - Format: "", - }, - }, - "bearerTokenFile": { - SchemaProps: spec.SchemaProps{ - Description: "File to read bearer token for accessing apiserver.", - Type: []string{"string"}, - Format: "", - }, - }, - "tlsConfig": { - SchemaProps: spec.SchemaProps{ - Description: "TLS Config to use for accessing apiserver.", - Ref: ref("github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.TLSConfig"), - }, - }, - }, - Required: []string{"host"}, - }, - }, - Dependencies: []string{ - "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.BasicAuth", "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.TLSConfig"}, - } -} - -func schema_pkg_apis_monitoring_v1_AlertingSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "AlertingSpec defines parameters for alerting configuration of Prometheus servers.", - Properties: map[string]spec.Schema{ - "alertmanagers": { - SchemaProps: spec.SchemaProps{ - Description: "AlertmanagerEndpoints Prometheus should fire alerts against.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.AlertmanagerEndpoints"), - }, - }, - }, - }, - }, - }, - Required: []string{"alertmanagers"}, - }, - }, - Dependencies: []string{ - "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.AlertmanagerEndpoints"}, - } -} - -func schema_pkg_apis_monitoring_v1_Alertmanager(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Alertmanager describes an Alertmanager cluster.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "spec": { - SchemaProps: spec.SchemaProps{ - Description: "Specification of the desired behavior of the Alertmanager cluster. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status", - Ref: ref("github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.AlertmanagerSpec"), - }, - }, - "status": { - SchemaProps: spec.SchemaProps{ - Description: "Most recent observed status of the Alertmanager cluster. Read-only. Not included when requesting from the apiserver, only from the Prometheus Operator API itself. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status", - Ref: ref("github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.AlertmanagerStatus"), - }, - }, - }, - Required: []string{"spec"}, - }, - }, - Dependencies: []string{ - "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.AlertmanagerSpec", "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.AlertmanagerStatus"}, - } -} - -func schema_pkg_apis_monitoring_v1_AlertmanagerEndpoints(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "AlertmanagerEndpoints defines a selection of a single Endpoints object containing alertmanager IPs to fire alerts against.", - Properties: map[string]spec.Schema{ - "namespace": { - SchemaProps: spec.SchemaProps{ - Description: "Namespace of Endpoints object.", - Type: []string{"string"}, - Format: "", - }, - }, - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name of Endpoints object in Namespace.", - Type: []string{"string"}, - Format: "", - }, - }, - "port": { - SchemaProps: spec.SchemaProps{ - Description: "Port the Alertmanager API is exposed on.", - Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), - }, - }, - "scheme": { - SchemaProps: spec.SchemaProps{ - Description: "Scheme to use when firing alerts.", - Type: []string{"string"}, - Format: "", - }, - }, - "pathPrefix": { - SchemaProps: spec.SchemaProps{ - Description: "Prefix for the HTTP path alerts are pushed to.", - Type: []string{"string"}, - Format: "", - }, - }, - "tlsConfig": { - SchemaProps: spec.SchemaProps{ - Description: "TLS Config to use for alertmanager connection.", - Ref: ref("github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.TLSConfig"), - }, - }, - "bearerTokenFile": { - SchemaProps: spec.SchemaProps{ - Description: "BearerTokenFile to read from filesystem to use when authenticating to Alertmanager.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"namespace", "name", "port"}, - }, - }, - Dependencies: []string{ - "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.TLSConfig", "k8s.io/apimachinery/pkg/util/intstr.IntOrString"}, - } -} - -func schema_pkg_apis_monitoring_v1_AlertmanagerList(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "AlertmanagerList is a list of Alertmanagers.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard list metadata More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), - }, - }, - "items": { - SchemaProps: spec.SchemaProps{ - Description: "List of Alertmanagers", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.Alertmanager"), - }, - }, - }, - }, - }, - }, - Required: []string{"items"}, - }, - }, - Dependencies: []string{ - "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.Alertmanager", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, - } -} - -func schema_pkg_apis_monitoring_v1_AlertmanagerSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "AlertmanagerSpec is a specification of the desired behavior of the Alertmanager cluster. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status", - Properties: map[string]spec.Schema{ - "podMetadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard object’s metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata Metadata Labels and Annotations gets propagated to the prometheus pods.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "image": { - SchemaProps: spec.SchemaProps{ - Description: "Image if specified has precedence over baseImage, tag and sha combinations. Specifying the version is still necessary to ensure the Prometheus Operator knows what version of Alertmanager is being configured.", - Type: []string{"string"}, - Format: "", - }, - }, - "version": { - SchemaProps: spec.SchemaProps{ - Description: "Version the cluster should be on.", - Type: []string{"string"}, - Format: "", - }, - }, - "tag": { - SchemaProps: spec.SchemaProps{ - Description: "Tag of Alertmanager container image to be deployed. Defaults to the value of `version`. Version is ignored if Tag is set.", - Type: []string{"string"}, - Format: "", - }, - }, - "sha": { - SchemaProps: spec.SchemaProps{ - Description: "SHA of Alertmanager container image to be deployed. Defaults to the value of `version`. Similar to a tag, but the SHA explicitly deploys an immutable container image. Version and Tag are ignored if SHA is set.", - Type: []string{"string"}, - Format: "", - }, - }, - "baseImage": { - SchemaProps: spec.SchemaProps{ - Description: "Base image that is used to deploy pods, without tag.", - Type: []string{"string"}, - Format: "", - }, - }, - "imagePullSecrets": { - SchemaProps: spec.SchemaProps{ - Description: "An optional list of references to secrets in the same namespace to use for pulling prometheus and alertmanager images from registries see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), - }, - }, - }, - }, - }, - "secrets": { - SchemaProps: spec.SchemaProps{ - Description: "Secrets is a list of Secrets in the same namespace as the Alertmanager object, which shall be mounted into the Alertmanager Pods. The Secrets are mounted into /etc/alertmanager/secrets/.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "configMaps": { - SchemaProps: spec.SchemaProps{ - Description: "ConfigMaps is a list of ConfigMaps in the same namespace as the Alertmanager object, which shall be mounted into the Alertmanager Pods. The ConfigMaps are mounted into /etc/alertmanager/configmaps/.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "logLevel": { - SchemaProps: spec.SchemaProps{ - Description: "Log level for Alertmanager to be configured with.", - Type: []string{"string"}, - Format: "", - }, - }, - "replicas": { - SchemaProps: spec.SchemaProps{ - Description: "Size is the expected size of the alertmanager cluster. The controller will eventually make the size of the running cluster equal to the expected size.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "retention": { - SchemaProps: spec.SchemaProps{ - Description: "Time duration Alertmanager shall retain data for. Default is '120h', and must match the regular expression `[0-9]+(ms|s|m|h)` (milliseconds seconds minutes hours).", - Type: []string{"string"}, - Format: "", - }, - }, - "storage": { - SchemaProps: spec.SchemaProps{ - Description: "Storage is the definition of how storage will be used by the Alertmanager instances.", - Ref: ref("github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.StorageSpec"), - }, - }, - "externalUrl": { - SchemaProps: spec.SchemaProps{ - Description: "The external URL the Alertmanager instances will be available under. This is necessary to generate correct URLs. This is necessary if Alertmanager is not served from root of a DNS name.", - Type: []string{"string"}, - Format: "", - }, - }, - "routePrefix": { - SchemaProps: spec.SchemaProps{ - Description: "The route prefix Alertmanager registers HTTP handlers for. This is useful, if using ExternalURL and a proxy is rewriting HTTP routes of a request, and the actual ExternalURL is still true, but the server serves requests under a different route prefix. For example for use with `kubectl proxy`.", - Type: []string{"string"}, - Format: "", - }, - }, - "paused": { - SchemaProps: spec.SchemaProps{ - Description: "If set to true all actions on the underlaying managed objects are not goint to be performed, except for delete actions.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "nodeSelector": { - SchemaProps: spec.SchemaProps{ - Description: "Define which Nodes the Pods are scheduled on.", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "resources": { - SchemaProps: spec.SchemaProps{ - Description: "Define resources requests and limits for single Pods.", - Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), - }, - }, - "affinity": { - SchemaProps: spec.SchemaProps{ - Description: "If specified, the pod's scheduling constraints.", - Ref: ref("k8s.io/api/core/v1.Affinity"), - }, - }, - "tolerations": { - SchemaProps: spec.SchemaProps{ - Description: "If specified, the pod's tolerations.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.Toleration"), - }, - }, - }, - }, - }, - "securityContext": { - SchemaProps: spec.SchemaProps{ - Description: "SecurityContext holds pod-level security attributes and common container settings. This defaults to non root user with uid 1000 and gid 2000.", - Ref: ref("k8s.io/api/core/v1.PodSecurityContext"), - }, - }, - "serviceAccountName": { - SchemaProps: spec.SchemaProps{ - Description: "ServiceAccountName is the name of the ServiceAccount to use to run the Prometheus Pods.", - Type: []string{"string"}, - Format: "", - }, - }, - "listenLocal": { - SchemaProps: spec.SchemaProps{ - Description: "ListenLocal makes the Alertmanager server listen on loopback, so that it does not bind against the Pod IP. Note this is only for the Alertmanager UI, not the gossip communication.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "containers": { - SchemaProps: spec.SchemaProps{ - Description: "Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to an Alertmanager pod.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.Container"), - }, - }, - }, - }, - }, - "priorityClassName": { - SchemaProps: spec.SchemaProps{ - Description: "Priority class assigned to the Pods", - Type: []string{"string"}, - Format: "", - }, - }, - "additionalPeers": { - SchemaProps: spec.SchemaProps{ - Description: "AdditionalPeers allows injecting a set of additional Alertmanagers to peer with to form a highly available cluster.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.StorageSpec", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Container", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.Toleration", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_pkg_apis_monitoring_v1_AlertmanagerStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "AlertmanagerStatus is the most recent observed status of the Alertmanager cluster. Read-only. Not included when requesting from the apiserver, only from the Prometheus Operator API itself. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status", - Properties: map[string]spec.Schema{ - "paused": { - SchemaProps: spec.SchemaProps{ - Description: "Represents whether any actions on the underlaying managed objects are being performed. Only delete actions will be performed.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "replicas": { - SchemaProps: spec.SchemaProps{ - Description: "Total number of non-terminated pods targeted by this Alertmanager cluster (their labels match the selector).", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "updatedReplicas": { - SchemaProps: spec.SchemaProps{ - Description: "Total number of non-terminated pods targeted by this Alertmanager cluster that have the desired version spec.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "availableReplicas": { - SchemaProps: spec.SchemaProps{ - Description: "Total number of available pods (ready for at least minReadySeconds) targeted by this Alertmanager cluster.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "unavailableReplicas": { - SchemaProps: spec.SchemaProps{ - Description: "Total number of unavailable pods targeted by this Alertmanager cluster.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - }, - Required: []string{"paused", "replicas", "updatedReplicas", "availableReplicas", "unavailableReplicas"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_pkg_apis_monitoring_v1_BasicAuth(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "BasicAuth allow an endpoint to authenticate over basic authentication More info: https://prometheus.io/docs/operating/configuration/#endpoints", - Properties: map[string]spec.Schema{ - "username": { - SchemaProps: spec.SchemaProps{ - Description: "The secret that contains the username for authenticate", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "password": { - SchemaProps: spec.SchemaProps{ - Description: "The secret that contains the password for authenticate", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.SecretKeySelector"}, - } -} - -func schema_pkg_apis_monitoring_v1_Endpoint(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Endpoint defines a scrapeable endpoint serving Prometheus metrics.", - Properties: map[string]spec.Schema{ - "port": { - SchemaProps: spec.SchemaProps{ - Description: "Name of the service port this endpoint refers to. Mutually exclusive with targetPort.", - Type: []string{"string"}, - Format: "", - }, - }, - "targetPort": { - SchemaProps: spec.SchemaProps{ - Description: "Name or number of the target port of the endpoint. Mutually exclusive with port.", - Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), - }, - }, - "path": { - SchemaProps: spec.SchemaProps{ - Description: "HTTP path to scrape for metrics.", - Type: []string{"string"}, - Format: "", - }, - }, - "scheme": { - SchemaProps: spec.SchemaProps{ - Description: "HTTP scheme to use for scraping.", - Type: []string{"string"}, - Format: "", - }, - }, - "params": { - SchemaProps: spec.SchemaProps{ - Description: "Optional HTTP URL parameters", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - }, - }, - }, - "interval": { - SchemaProps: spec.SchemaProps{ - Description: "Interval at which metrics should be scraped", - Type: []string{"string"}, - Format: "", - }, - }, - "scrapeTimeout": { - SchemaProps: spec.SchemaProps{ - Description: "Timeout after which the scrape is ended", - Type: []string{"string"}, - Format: "", - }, - }, - "tlsConfig": { - SchemaProps: spec.SchemaProps{ - Description: "TLS configuration to use when scraping the endpoint", - Ref: ref("github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.TLSConfig"), - }, - }, - "bearerTokenFile": { - SchemaProps: spec.SchemaProps{ - Description: "File to read bearer token for scraping targets.", - Type: []string{"string"}, - Format: "", - }, - }, - "honorLabels": { - SchemaProps: spec.SchemaProps{ - Description: "HonorLabels chooses the metric's labels on collisions with target labels.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "basicAuth": { - SchemaProps: spec.SchemaProps{ - Description: "BasicAuth allow an endpoint to authenticate over basic authentication More info: https://prometheus.io/docs/operating/configuration/#endpoints", - Ref: ref("github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.BasicAuth"), - }, - }, - "metricRelabelings": { - SchemaProps: spec.SchemaProps{ - Description: "MetricRelabelConfigs to apply to samples before ingestion.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.RelabelConfig"), - }, - }, - }, - }, - }, - "relabelings": { - SchemaProps: spec.SchemaProps{ - Description: "RelabelConfigs to apply to samples before ingestion. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.RelabelConfig"), - }, - }, - }, - }, - }, - "proxyUrl": { - SchemaProps: spec.SchemaProps{ - Description: "ProxyURL eg http://proxyserver:2195 Directs scrapes to proxy through this endpoint.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.BasicAuth", "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.RelabelConfig", "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.TLSConfig", "k8s.io/apimachinery/pkg/util/intstr.IntOrString"}, - } -} - -func schema_pkg_apis_monitoring_v1_NamespaceSelector(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "NamespaceSelector is a selector for selecting either all namespaces or a list of namespaces.", - Properties: map[string]spec.Schema{ - "any": { - SchemaProps: spec.SchemaProps{ - Description: "Boolean describing whether all namespaces are selected in contrast to a list restricting them.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "matchNames": { - SchemaProps: spec.SchemaProps{ - Description: "List of namespace names.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{}, - } -} - -func schema_pkg_apis_monitoring_v1_Prometheus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Prometheus defines a Prometheus deployment.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "spec": { - SchemaProps: spec.SchemaProps{ - Description: "Specification of the desired behavior of the Prometheus cluster. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status", - Ref: ref("github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.PrometheusSpec"), - }, - }, - "status": { - SchemaProps: spec.SchemaProps{ - Description: "Most recent observed status of the Prometheus cluster. Read-only. Not included when requesting from the apiserver, only from the Prometheus Operator API itself. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status", - Ref: ref("github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.PrometheusStatus"), - }, - }, - }, - Required: []string{"spec"}, - }, - }, - Dependencies: []string{ - "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.PrometheusSpec", "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.PrometheusStatus"}, - } -} - -func schema_pkg_apis_monitoring_v1_PrometheusList(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PrometheusList is a list of Prometheuses.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard list metadata More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), - }, - }, - "items": { - SchemaProps: spec.SchemaProps{ - Description: "List of Prometheuses", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.Prometheus"), - }, - }, - }, - }, - }, - }, - Required: []string{"items"}, - }, - }, - Dependencies: []string{ - "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.Prometheus", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, - } -} - -func schema_pkg_apis_monitoring_v1_PrometheusRule(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PrometheusRule defines alerting rules for a Prometheus instance", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard object’s metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "spec": { - SchemaProps: spec.SchemaProps{ - Description: "Specification of desired alerting rule definitions for Prometheus.", - Ref: ref("github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.PrometheusRuleSpec"), - }, - }, - }, - Required: []string{"spec"}, - }, - }, - Dependencies: []string{ - "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.PrometheusRuleSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_pkg_apis_monitoring_v1_PrometheusRuleList(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PrometheusRuleList is a list of PrometheusRules.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard list metadata More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), - }, - }, - "items": { - SchemaProps: spec.SchemaProps{ - Description: "List of Rules", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.PrometheusRule"), - }, - }, - }, - }, - }, - }, - Required: []string{"items"}, - }, - }, - Dependencies: []string{ - "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.PrometheusRule", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, - } -} - -func schema_pkg_apis_monitoring_v1_PrometheusRuleSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PrometheusRuleSpec contains specification parameters for a Rule.", - Properties: map[string]spec.Schema{ - "groups": { - SchemaProps: spec.SchemaProps{ - Description: "Content of Prometheus rule file", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.RuleGroup"), - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.RuleGroup"}, - } -} - -func schema_pkg_apis_monitoring_v1_PrometheusSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PrometheusSpec is a specification of the desired behavior of the Prometheus cluster. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status", - Properties: map[string]spec.Schema{ - "podMetadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard object’s metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata Metadata Labels and Annotations gets propagated to the prometheus pods.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "serviceMonitorSelector": { - SchemaProps: spec.SchemaProps{ - Description: "ServiceMonitors to be selected for target discovery.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"), - }, - }, - "serviceMonitorNamespaceSelector": { - SchemaProps: spec.SchemaProps{ - Description: "Namespaces to be selected for ServiceMonitor discovery. If nil, only check own namespace.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"), - }, - }, - "version": { - SchemaProps: spec.SchemaProps{ - Description: "Version of Prometheus to be deployed.", - Type: []string{"string"}, - Format: "", - }, - }, - "tag": { - SchemaProps: spec.SchemaProps{ - Description: "Tag of Prometheus container image to be deployed. Defaults to the value of `version`. Version is ignored if Tag is set.", - Type: []string{"string"}, - Format: "", - }, - }, - "sha": { - SchemaProps: spec.SchemaProps{ - Description: "SHA of Prometheus container image to be deployed. Defaults to the value of `version`. Similar to a tag, but the SHA explicitly deploys an immutable container image. Version and Tag are ignored if SHA is set.", - Type: []string{"string"}, - Format: "", - }, - }, - "paused": { - SchemaProps: spec.SchemaProps{ - Description: "When a Prometheus deployment is paused, no actions except for deletion will be performed on the underlying objects.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "image": { - SchemaProps: spec.SchemaProps{ - Description: "Image if specified has precedence over baseImage, tag and sha combinations. Specifying the version is still necessary to ensure the Prometheus Operator knows what version of Prometheus is being configured.", - Type: []string{"string"}, - Format: "", - }, - }, - "baseImage": { - SchemaProps: spec.SchemaProps{ - Description: "Base image to use for a Prometheus deployment.", - Type: []string{"string"}, - Format: "", - }, - }, - "imagePullSecrets": { - SchemaProps: spec.SchemaProps{ - Description: "An optional list of references to secrets in the same namespace to use for pulling prometheus and alertmanager images from registries see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), - }, - }, - }, - }, - }, - "replicas": { - SchemaProps: spec.SchemaProps{ - Description: "Number of instances to deploy for a Prometheus deployment.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "replicaExternalLabelName": { - SchemaProps: spec.SchemaProps{ - Description: "Name of Prometheus external label used to denote replica name. Defaults to the value of `prometheus_replica`.", - Type: []string{"string"}, - Format: "", - }, - }, - "retention": { - SchemaProps: spec.SchemaProps{ - Description: "Time duration Prometheus shall retain data for. Default is '24h', and must match the regular expression `[0-9]+(ms|s|m|h|d|w|y)` (milliseconds seconds minutes hours days weeks years).", - Type: []string{"string"}, - Format: "", - }, - }, - "logLevel": { - SchemaProps: spec.SchemaProps{ - Description: "Log level for Prometheus to be configured with.", - Type: []string{"string"}, - Format: "", - }, - }, - "logFormat": { - SchemaProps: spec.SchemaProps{ - Description: "Log format for Prometheus to be configured with.", - Type: []string{"string"}, - Format: "", - }, - }, - "scrapeInterval": { - SchemaProps: spec.SchemaProps{ - Description: "Interval between consecutive scrapes.", - Type: []string{"string"}, - Format: "", - }, - }, - "evaluationInterval": { - SchemaProps: spec.SchemaProps{ - Description: "Interval between consecutive evaluations.", - Type: []string{"string"}, - Format: "", - }, - }, - "rules": { - SchemaProps: spec.SchemaProps{ - Description: "/--rules.*/ command-line arguments.", - Ref: ref("github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.Rules"), - }, - }, - "externalLabels": { - SchemaProps: spec.SchemaProps{ - Description: "The labels to add to any time series or alerts when communicating with external systems (federation, remote storage, Alertmanager).", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "enableAdminAPI": { - SchemaProps: spec.SchemaProps{ - Description: "Enable access to prometheus web admin API. Defaults to the value of `false`. WARNING: Enabling the admin APIs enables mutating endpoints, to delete data, shutdown Prometheus, and more. Enabling this should be done with care and the user is advised to add additional authentication authorization via a proxy to ensure only clients authorized to perform these actions can do so. For more information see https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-admin-apis", - Type: []string{"boolean"}, - Format: "", - }, - }, - "externalUrl": { - SchemaProps: spec.SchemaProps{ - Description: "The external URL the Prometheus instances will be available under. This is necessary to generate correct URLs. This is necessary if Prometheus is not served from root of a DNS name.", - Type: []string{"string"}, - Format: "", - }, - }, - "routePrefix": { - SchemaProps: spec.SchemaProps{ - Description: "The route prefix Prometheus registers HTTP handlers for. This is useful, if using ExternalURL and a proxy is rewriting HTTP routes of a request, and the actual ExternalURL is still true, but the server serves requests under a different route prefix. For example for use with `kubectl proxy`.", - Type: []string{"string"}, - Format: "", - }, - }, - "query": { - SchemaProps: spec.SchemaProps{ - Description: "QuerySpec defines the query command line flags when starting Prometheus.", - Ref: ref("github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.QuerySpec"), - }, - }, - "storage": { - SchemaProps: spec.SchemaProps{ - Description: "Storage spec to specify how storage shall be used.", - Ref: ref("github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.StorageSpec"), - }, - }, - "ruleSelector": { - SchemaProps: spec.SchemaProps{ - Description: "A selector to select which PrometheusRules to mount for loading alerting rules from. Until (excluding) Prometheus Operator v0.24.0 Prometheus Operator will migrate any legacy rule ConfigMaps to PrometheusRule custom resources selected by RuleSelector. Make sure it does not match any config maps that you do not want to be migrated.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"), - }, - }, - "ruleNamespaceSelector": { - SchemaProps: spec.SchemaProps{ - Description: "Namespaces to be selected for PrometheusRules discovery. If unspecified, only the same namespace as the Prometheus object is in is used.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"), - }, - }, - "alerting": { - SchemaProps: spec.SchemaProps{ - Description: "Define details regarding alerting.", - Ref: ref("github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.AlertingSpec"), - }, - }, - "resources": { - SchemaProps: spec.SchemaProps{ - Description: "Define resources requests and limits for single Pods.", - Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), - }, - }, - "nodeSelector": { - SchemaProps: spec.SchemaProps{ - Description: "Define which Nodes the Pods are scheduled on.", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "serviceAccountName": { - SchemaProps: spec.SchemaProps{ - Description: "ServiceAccountName is the name of the ServiceAccount to use to run the Prometheus Pods.", - Type: []string{"string"}, - Format: "", - }, - }, - "secrets": { - SchemaProps: spec.SchemaProps{ - Description: "Secrets is a list of Secrets in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods. The Secrets are mounted into /etc/prometheus/secrets/.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "configMaps": { - SchemaProps: spec.SchemaProps{ - Description: "ConfigMaps is a list of ConfigMaps in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods. The ConfigMaps are mounted into /etc/prometheus/configmaps/.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "affinity": { - SchemaProps: spec.SchemaProps{ - Description: "If specified, the pod's scheduling constraints.", - Ref: ref("k8s.io/api/core/v1.Affinity"), - }, - }, - "tolerations": { - SchemaProps: spec.SchemaProps{ - Description: "If specified, the pod's tolerations.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.Toleration"), - }, - }, - }, - }, - }, - "remoteWrite": { - SchemaProps: spec.SchemaProps{ - Description: "If specified, the remote_write spec. This is an experimental feature, it may change in any upcoming release in a breaking way.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.RemoteWriteSpec"), - }, - }, - }, - }, - }, - "remoteRead": { - SchemaProps: spec.SchemaProps{ - Description: "If specified, the remote_read spec. This is an experimental feature, it may change in any upcoming release in a breaking way.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.RemoteReadSpec"), - }, - }, - }, - }, - }, - "securityContext": { - SchemaProps: spec.SchemaProps{ - Description: "SecurityContext holds pod-level security attributes and common container settings. This defaults to non root user with uid 1000 and gid 2000 for Prometheus >v2.0 and default PodSecurityContext for other versions.", - Ref: ref("k8s.io/api/core/v1.PodSecurityContext"), - }, - }, - "listenLocal": { - SchemaProps: spec.SchemaProps{ - Description: "ListenLocal makes the Prometheus server listen on loopback, so that it does not bind against the Pod IP.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "containers": { - SchemaProps: spec.SchemaProps{ - Description: "Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to a Prometheus pod.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.Container"), - }, - }, - }, - }, - }, - "additionalScrapeConfigs": { - SchemaProps: spec.SchemaProps{ - Description: "AdditionalScrapeConfigs allows specifying a key of a Secret containing additional Prometheus scrape configurations. Scrape configurations specified are appended to the configurations generated by the Prometheus Operator. Job configurations specified must have the form as specified in the official Prometheus documentation: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config. As scrape configs are appended, the user is responsible to make sure it is valid. Note that using this feature may expose the possibility to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible scrape configs are going to break Prometheus after the upgrade.", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "additionalAlertRelabelConfigs": { - SchemaProps: spec.SchemaProps{ - Description: "AdditionalAlertRelabelConfigs allows specifying a key of a Secret containing additional Prometheus alert relabel configurations. Alert relabel configurations specified are appended to the configurations generated by the Prometheus Operator. Alert relabel configurations specified must have the form as specified in the official Prometheus documentation: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs. As alert relabel configs are appended, the user is responsible to make sure it is valid. Note that using this feature may expose the possibility to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible alert relabel configs are going to break Prometheus after the upgrade.", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "additionalAlertManagerConfigs": { - SchemaProps: spec.SchemaProps{ - Description: "AdditionalAlertManagerConfigs allows specifying a key of a Secret containing additional Prometheus AlertManager configurations. AlertManager configurations specified are appended to the configurations generated by the Prometheus Operator. Job configurations specified must have the form as specified in the official Prometheus documentation: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alertmanager_config. As AlertManager configs are appended, the user is responsible to make sure it is valid. Note that using this feature may expose the possibility to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible AlertManager configs are going to break Prometheus after the upgrade.", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "apiserverConfig": { - SchemaProps: spec.SchemaProps{ - Description: "APIServerConfig allows specifying a host and auth methods to access apiserver. If left empty, Prometheus is assumed to run inside of the cluster and will discover API servers automatically and use the pod's CA certificate and bearer token file at /var/run/secrets/kubernetes.io/serviceaccount/.", - Ref: ref("github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.APIServerConfig"), - }, - }, - "thanos": { - SchemaProps: spec.SchemaProps{ - Description: "Thanos configuration allows configuring various aspects of a Prometheus server in a Thanos environment.\n\nThis section is experimental, it may change significantly without deprecation notice in any release.\n\nThis is experimental and may change significantly without backward compatibility in any release.", - Ref: ref("github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.ThanosSpec"), - }, - }, - "priorityClassName": { - SchemaProps: spec.SchemaProps{ - Description: "Priority class assigned to the Pods", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.APIServerConfig", "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.AlertingSpec", "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.QuerySpec", "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.RemoteReadSpec", "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.RemoteWriteSpec", "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.Rules", "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.StorageSpec", "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.ThanosSpec", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Container", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecretKeySelector", "k8s.io/api/core/v1.Toleration", "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_pkg_apis_monitoring_v1_PrometheusStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PrometheusStatus is the most recent observed status of the Prometheus cluster. Read-only. Not included when requesting from the apiserver, only from the Prometheus Operator API itself. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status", - Properties: map[string]spec.Schema{ - "paused": { - SchemaProps: spec.SchemaProps{ - Description: "Represents whether any actions on the underlaying managed objects are being performed. Only delete actions will be performed.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "replicas": { - SchemaProps: spec.SchemaProps{ - Description: "Total number of non-terminated pods targeted by this Prometheus deployment (their labels match the selector).", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "updatedReplicas": { - SchemaProps: spec.SchemaProps{ - Description: "Total number of non-terminated pods targeted by this Prometheus deployment that have the desired version spec.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "availableReplicas": { - SchemaProps: spec.SchemaProps{ - Description: "Total number of available pods (ready for at least minReadySeconds) targeted by this Prometheus deployment.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "unavailableReplicas": { - SchemaProps: spec.SchemaProps{ - Description: "Total number of unavailable pods targeted by this Prometheus deployment.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - }, - Required: []string{"paused", "replicas", "updatedReplicas", "availableReplicas", "unavailableReplicas"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_pkg_apis_monitoring_v1_QuerySpec(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "QuerySpec defines the query command line flags when starting Prometheus.", - Properties: map[string]spec.Schema{ - "lookbackDelta": { - SchemaProps: spec.SchemaProps{ - Description: "The delta difference allowed for retrieving metrics during expression evaluations.", - Type: []string{"string"}, - Format: "", - }, - }, - "maxConcurrency": { - SchemaProps: spec.SchemaProps{ - Description: "Number of concurrent queries that can be run at once.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "timeout": { - SchemaProps: spec.SchemaProps{ - Description: "Maximum time a query may take before being aborted.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{}, - } -} - -func schema_pkg_apis_monitoring_v1_QueueConfig(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "QueueConfig allows the tuning of remote_write queue_config parameters. This object is referenced in the RemoteWriteSpec object.", - Properties: map[string]spec.Schema{ - "capacity": { - SchemaProps: spec.SchemaProps{ - Description: "Capacity is the number of samples to buffer per shard before we start dropping them.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "minShards": { - SchemaProps: spec.SchemaProps{ - Description: "MinShards is the minimum number of shards, i.e. amount of concurrency.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "maxShards": { - SchemaProps: spec.SchemaProps{ - Description: "MaxShards is the maximum number of shards, i.e. amount of concurrency.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "maxSamplesPerSend": { - SchemaProps: spec.SchemaProps{ - Description: "MaxSamplesPerSend is the maximum number of samples per send.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "batchSendDeadline": { - SchemaProps: spec.SchemaProps{ - Description: "BatchSendDeadline is the maximum time a sample will wait in buffer.", - Type: []string{"string"}, - Format: "", - }, - }, - "maxRetries": { - SchemaProps: spec.SchemaProps{ - Description: "MaxRetries is the maximum number of times to retry a batch on recoverable errors.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "minBackoff": { - SchemaProps: spec.SchemaProps{ - Description: "MinBackoff is the initial retry delay. Gets doubled for every retry.", - Type: []string{"string"}, - Format: "", - }, - }, - "maxBackoff": { - SchemaProps: spec.SchemaProps{ - Description: "MaxBackoff is the maximum retry delay.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{}, - } -} - -func schema_pkg_apis_monitoring_v1_RelabelConfig(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "RelabelConfig allows dynamic rewriting of the label set, being applied to samples before ingestion. It defines ``-section of Prometheus configuration. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs", - Properties: map[string]spec.Schema{ - "sourceLabels": { - SchemaProps: spec.SchemaProps{ - Description: "The source labels select values from existing labels. Their content is concatenated using the configured separator and matched against the configured regular expression for the replace, keep, and drop actions.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "separator": { - SchemaProps: spec.SchemaProps{ - Description: "Separator placed between concatenated source label values. default is ';'.", - Type: []string{"string"}, - Format: "", - }, - }, - "targetLabel": { - SchemaProps: spec.SchemaProps{ - Description: "Label to which the resulting value is written in a replace action. It is mandatory for replace actions. Regex capture groups are available.", - Type: []string{"string"}, - Format: "", - }, - }, - "regex": { - SchemaProps: spec.SchemaProps{ - Description: "Regular expression against which the extracted value is matched. defailt is '(.*)'", - Type: []string{"string"}, - Format: "", - }, - }, - "modulus": { - SchemaProps: spec.SchemaProps{ - Description: "Modulus to take of the hash of the source label values.", - Type: []string{"integer"}, - Format: "int64", - }, - }, - "replacement": { - SchemaProps: spec.SchemaProps{ - Description: "Replacement value against which a regex replace is performed if the regular expression matches. Regex capture groups are available. Default is '$1'", - Type: []string{"string"}, - Format: "", - }, - }, - "action": { - SchemaProps: spec.SchemaProps{ - Description: "Action to perform based on regex matching. Default is 'replace'", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{}, - } -} - -func schema_pkg_apis_monitoring_v1_RemoteReadSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "RemoteReadSpec defines the remote_read configuration for prometheus.", - Properties: map[string]spec.Schema{ - "url": { - SchemaProps: spec.SchemaProps{ - Description: "The URL of the endpoint to send samples to.", - Type: []string{"string"}, - Format: "", - }, - }, - "requiredMatchers": { - SchemaProps: spec.SchemaProps{ - Description: "An optional list of equality matchers which have to be present in a selector to query the remote read endpoint.", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "remoteTimeout": { - SchemaProps: spec.SchemaProps{ - Description: "Timeout for requests to the remote read endpoint.", - Type: []string{"string"}, - Format: "", - }, - }, - "readRecent": { - SchemaProps: spec.SchemaProps{ - Description: "Whether reads should be made for queries for time ranges that the local storage should have complete data for.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "basicAuth": { - SchemaProps: spec.SchemaProps{ - Description: "BasicAuth for the URL.", - Ref: ref("github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.BasicAuth"), - }, - }, - "bearerToken": { - SchemaProps: spec.SchemaProps{ - Description: "bearer token for remote read.", - Type: []string{"string"}, - Format: "", - }, - }, - "bearerTokenFile": { - SchemaProps: spec.SchemaProps{ - Description: "File to read bearer token for remote read.", - Type: []string{"string"}, - Format: "", - }, - }, - "tlsConfig": { - SchemaProps: spec.SchemaProps{ - Description: "TLS Config to use for remote read.", - Ref: ref("github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.TLSConfig"), - }, - }, - "proxyUrl": { - SchemaProps: spec.SchemaProps{ - Description: "Optional ProxyURL", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"url"}, - }, - }, - Dependencies: []string{ - "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.BasicAuth", "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.TLSConfig"}, - } -} - -func schema_pkg_apis_monitoring_v1_RemoteWriteSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "RemoteWriteSpec defines the remote_write configuration for prometheus.", - Properties: map[string]spec.Schema{ - "url": { - SchemaProps: spec.SchemaProps{ - Description: "The URL of the endpoint to send samples to.", - Type: []string{"string"}, - Format: "", - }, - }, - "remoteTimeout": { - SchemaProps: spec.SchemaProps{ - Description: "Timeout for requests to the remote write endpoint.", - Type: []string{"string"}, - Format: "", - }, - }, - "writeRelabelConfigs": { - SchemaProps: spec.SchemaProps{ - Description: "The list of remote write relabel configurations.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.RelabelConfig"), - }, - }, - }, - }, - }, - "basicAuth": { - SchemaProps: spec.SchemaProps{ - Description: "BasicAuth for the URL.", - Ref: ref("github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.BasicAuth"), - }, - }, - "bearerToken": { - SchemaProps: spec.SchemaProps{ - Description: "File to read bearer token for remote write.", - Type: []string{"string"}, - Format: "", - }, - }, - "bearerTokenFile": { - SchemaProps: spec.SchemaProps{ - Description: "File to read bearer token for remote write.", - Type: []string{"string"}, - Format: "", - }, - }, - "tlsConfig": { - SchemaProps: spec.SchemaProps{ - Description: "TLS Config to use for remote write.", - Ref: ref("github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.TLSConfig"), - }, - }, - "proxyUrl": { - SchemaProps: spec.SchemaProps{ - Description: "Optional ProxyURL", - Type: []string{"string"}, - Format: "", - }, - }, - "queueConfig": { - SchemaProps: spec.SchemaProps{ - Description: "QueueConfig allows tuning of the remote write queue parameters.", - Ref: ref("github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.QueueConfig"), - }, - }, - }, - Required: []string{"url"}, - }, - }, - Dependencies: []string{ - "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.BasicAuth", "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.QueueConfig", "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.RelabelConfig", "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.TLSConfig"}, - } -} - -func schema_pkg_apis_monitoring_v1_Rule(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Rule describes an alerting or recording rule.", - Properties: map[string]spec.Schema{ - "record": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - "alert": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - "expr": { - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), - }, - }, - "for": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - "labels": { - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "annotations": { - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - }, - Required: []string{"expr"}, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/util/intstr.IntOrString"}, - } -} - -func schema_pkg_apis_monitoring_v1_RuleGroup(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "RuleGroup is a list of sequentially evaluated recording and alerting rules.", - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - "interval": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - "rules": { - SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.Rule"), - }, - }, - }, - }, - }, - }, - Required: []string{"name", "rules"}, - }, - }, - Dependencies: []string{ - "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.Rule"}, - } -} - -func schema_pkg_apis_monitoring_v1_Rules(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "/--rules.*/ command-line arguments", - Properties: map[string]spec.Schema{ - "alert": { - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.RulesAlert"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.RulesAlert"}, - } -} - -func schema_pkg_apis_monitoring_v1_RulesAlert(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "/--rules.alert.*/ command-line arguments", - Properties: map[string]spec.Schema{ - "forOutageTolerance": { - SchemaProps: spec.SchemaProps{ - Description: "Max time to tolerate prometheus outage for restoring 'for' state of alert.", - Type: []string{"string"}, - Format: "", - }, - }, - "forGracePeriod": { - SchemaProps: spec.SchemaProps{ - Description: "Minimum duration between alert and restored 'for' state. This is maintained only for alerts with configured 'for' time greater than grace period.", - Type: []string{"string"}, - Format: "", - }, - }, - "resendDelay": { - SchemaProps: spec.SchemaProps{ - Description: "Minimum amount of time to wait before resending an alert to Alertmanager.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{}, - } -} - -func schema_pkg_apis_monitoring_v1_ServiceMonitor(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ServiceMonitor defines monitoring for a set of services.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "spec": { - SchemaProps: spec.SchemaProps{ - Description: "Specification of desired Service selection for target discrovery by Prometheus.", - Ref: ref("github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.ServiceMonitorSpec"), - }, - }, - }, - Required: []string{"spec"}, - }, - }, - Dependencies: []string{ - "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.ServiceMonitorSpec"}, - } -} - -func schema_pkg_apis_monitoring_v1_ServiceMonitorList(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ServiceMonitorList is a list of ServiceMonitors.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard list metadata More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), - }, - }, - "items": { - SchemaProps: spec.SchemaProps{ - Description: "List of ServiceMonitors", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.ServiceMonitor"), - }, - }, - }, - }, - }, - }, - Required: []string{"items"}, - }, - }, - Dependencies: []string{ - "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.ServiceMonitor", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, - } -} - -func schema_pkg_apis_monitoring_v1_ServiceMonitorSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ServiceMonitorSpec contains specification parameters for a ServiceMonitor.", - Properties: map[string]spec.Schema{ - "jobLabel": { - SchemaProps: spec.SchemaProps{ - Description: "The label to use to retrieve the job name from.", - Type: []string{"string"}, - Format: "", - }, - }, - "targetLabels": { - SchemaProps: spec.SchemaProps{ - Description: "TargetLabels transfers labels on the Kubernetes Service onto the target.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "podTargetLabels": { - SchemaProps: spec.SchemaProps{ - Description: "PodTargetLabels transfers labels on the Kubernetes Pod onto the target.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "endpoints": { - SchemaProps: spec.SchemaProps{ - Description: "A list of endpoints allowed as part of this ServiceMonitor.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.Endpoint"), - }, - }, - }, - }, - }, - "selector": { - SchemaProps: spec.SchemaProps{ - Description: "Selector to select Endpoints objects.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"), - }, - }, - "namespaceSelector": { - SchemaProps: spec.SchemaProps{ - Description: "Selector to select which namespaces the Endpoints objects are discovered from.", - Ref: ref("github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.NamespaceSelector"), - }, - }, - "sampleLimit": { - SchemaProps: spec.SchemaProps{ - Description: "SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.", - Type: []string{"integer"}, - Format: "int64", - }, - }, - }, - Required: []string{"endpoints", "selector"}, - }, - }, - Dependencies: []string{ - "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.Endpoint", "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.NamespaceSelector", "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"}, - } -} - -func schema_pkg_apis_monitoring_v1_StorageSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "StorageSpec defines the configured storage for a group Prometheus servers. If neither `emptyDir` nor `volumeClaimTemplate` is specified, then by default an [EmptyDir](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir) will be used.", - Properties: map[string]spec.Schema{ - "emptyDir": { - SchemaProps: spec.SchemaProps{ - Description: "EmptyDirVolumeSource to be used by the Prometheus StatefulSets. If specified, used in place of any volumeClaimTemplate. More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir", - Ref: ref("k8s.io/api/core/v1.EmptyDirVolumeSource"), - }, - }, - "volumeClaimTemplate": { - SchemaProps: spec.SchemaProps{ - Description: "A PVC spec to be used by the Prometheus StatefulSets.", - Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaim"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.EmptyDirVolumeSource", "k8s.io/api/core/v1.PersistentVolumeClaim"}, - } -} - -func schema_pkg_apis_monitoring_v1_TLSConfig(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "TLSConfig specifies TLS configuration parameters.", - Properties: map[string]spec.Schema{ - "caFile": { - SchemaProps: spec.SchemaProps{ - Description: "The CA cert to use for the targets.", - Type: []string{"string"}, - Format: "", - }, - }, - "certFile": { - SchemaProps: spec.SchemaProps{ - Description: "The client cert file for the targets.", - Type: []string{"string"}, - Format: "", - }, - }, - "keyFile": { - SchemaProps: spec.SchemaProps{ - Description: "The client key file for the targets.", - Type: []string{"string"}, - Format: "", - }, - }, - "serverName": { - SchemaProps: spec.SchemaProps{ - Description: "Used to verify the hostname for the targets.", - Type: []string{"string"}, - Format: "", - }, - }, - "insecureSkipVerify": { - SchemaProps: spec.SchemaProps{ - Description: "Disable target certificate validation.", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{}, - } -} - -func schema_pkg_apis_monitoring_v1_ThanosGCSSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Deprecated: ThanosGCSSpec should be configured with an ObjectStorageConfig secret starting with Thanos v0.2.0. ThanosGCSSpec will be removed.", - Properties: map[string]spec.Schema{ - "bucket": { - SchemaProps: spec.SchemaProps{ - Description: "Google Cloud Storage bucket name for stored blocks. If empty it won't store any block inside Google Cloud Storage.", - Type: []string{"string"}, - Format: "", - }, - }, - "credentials": { - SchemaProps: spec.SchemaProps{ - Description: "Secret to access our Bucket.", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.SecretKeySelector"}, - } -} - -func schema_pkg_apis_monitoring_v1_ThanosS3Spec(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Deprecated: ThanosS3Spec should be configured with an ObjectStorageConfig secret starting with Thanos v0.2.0. ThanosS3Spec will be removed.", - Properties: map[string]spec.Schema{ - "bucket": { - SchemaProps: spec.SchemaProps{ - Description: "S3-Compatible API bucket name for stored blocks.", - Type: []string{"string"}, - Format: "", - }, - }, - "endpoint": { - SchemaProps: spec.SchemaProps{ - Description: "S3-Compatible API endpoint for stored blocks.", - Type: []string{"string"}, - Format: "", - }, - }, - "accessKey": { - SchemaProps: spec.SchemaProps{ - Description: "AccessKey for an S3-Compatible API.", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "secretKey": { - SchemaProps: spec.SchemaProps{ - Description: "SecretKey for an S3-Compatible API.", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "insecure": { - SchemaProps: spec.SchemaProps{ - Description: "Whether to use an insecure connection with an S3-Compatible API.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "signatureVersion2": { - SchemaProps: spec.SchemaProps{ - Description: "Whether to use S3 Signature Version 2; otherwise Signature Version 4 will be used.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "encryptsse": { - SchemaProps: spec.SchemaProps{ - Description: "Whether to use Server Side Encryption", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.SecretKeySelector"}, - } -} - -func schema_pkg_apis_monitoring_v1_ThanosSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ThanosSpec defines parameters for a Prometheus server within a Thanos deployment.", - Properties: map[string]spec.Schema{ - "peers": { - SchemaProps: spec.SchemaProps{ - Description: "Peers is a DNS name for Thanos to discover peers through.", - Type: []string{"string"}, - Format: "", - }, - }, - "image": { - SchemaProps: spec.SchemaProps{ - Description: "Image if specified has precedence over baseImage, tag and sha combinations. Specifying the version is still necessary to ensure the Prometheus Operator knows what version of Thanos is being configured.", - Type: []string{"string"}, - Format: "", - }, - }, - "version": { - SchemaProps: spec.SchemaProps{ - Description: "Version describes the version of Thanos to use.", - Type: []string{"string"}, - Format: "", - }, - }, - "tag": { - SchemaProps: spec.SchemaProps{ - Description: "Tag of Thanos sidecar container image to be deployed. Defaults to the value of `version`. Version is ignored if Tag is set.", - Type: []string{"string"}, - Format: "", - }, - }, - "sha": { - SchemaProps: spec.SchemaProps{ - Description: "SHA of Thanos container image to be deployed. Defaults to the value of `version`. Similar to a tag, but the SHA explicitly deploys an immutable container image. Version and Tag are ignored if SHA is set.", - Type: []string{"string"}, - Format: "", - }, - }, - "baseImage": { - SchemaProps: spec.SchemaProps{ - Description: "Thanos base image if other than default.", - Type: []string{"string"}, - Format: "", - }, - }, - "resources": { - SchemaProps: spec.SchemaProps{ - Description: "Resources defines the resource requirements for the Thanos sidecar. If not provided, no requests/limits will be set", - Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), - }, - }, - "gcs": { - SchemaProps: spec.SchemaProps{ - Description: "Deprecated: GCS should be configured with an ObjectStorageConfig secret starting with Thanos v0.2.0. This field will be removed.", - Ref: ref("github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.ThanosGCSSpec"), - }, - }, - "s3": { - SchemaProps: spec.SchemaProps{ - Description: "Deprecated: S3 should be configured with an ObjectStorageConfig secret starting with Thanos v0.2.0. This field will be removed.", - Ref: ref("github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.ThanosS3Spec"), - }, - }, - "objectStorageConfig": { - SchemaProps: spec.SchemaProps{ - Description: "ObjectStorageConfig configures object storage in Thanos.", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "grpcAdvertiseAddress": { - SchemaProps: spec.SchemaProps{ - Description: "Explicit (external) host:port address to advertise for gRPC StoreAPI in gossip cluster. If empty, 'grpc-address' will be used.", - Type: []string{"string"}, - Format: "", - }, - }, - "clusterAdvertiseAddress": { - SchemaProps: spec.SchemaProps{ - Description: "Explicit (external) ip:port address to advertise for gossip in gossip cluster. Used internally for membership only.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.ThanosGCSSpec", "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1.ThanosS3Spec", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecretKeySelector"}, - } -} - -func schema_k8sio_api_core_v1_AWSElasticBlockStoreVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Represents a Persistent Disk resource in AWS.\n\nAn AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling.", - Properties: map[string]spec.Schema{ - "volumeID": { - SchemaProps: spec.SchemaProps{ - Description: "Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", - Type: []string{"string"}, - Format: "", - }, - }, - "fsType": { - SchemaProps: spec.SchemaProps{ - Description: "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", - Type: []string{"string"}, - Format: "", - }, - }, - "partition": { - SchemaProps: spec.SchemaProps{ - Description: "The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "readOnly": { - SchemaProps: spec.SchemaProps{ - Description: "Specify \"true\" to force and set the ReadOnly property in VolumeMounts to \"true\". If omitted, the default is \"false\". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - Required: []string{"volumeID"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_Affinity(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Affinity is a group of affinity scheduling rules.", - Properties: map[string]spec.Schema{ - "nodeAffinity": { - SchemaProps: spec.SchemaProps{ - Description: "Describes node affinity scheduling rules for the pod.", - Ref: ref("k8s.io/api/core/v1.NodeAffinity"), - }, - }, - "podAffinity": { - SchemaProps: spec.SchemaProps{ - Description: "Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).", - Ref: ref("k8s.io/api/core/v1.PodAffinity"), - }, - }, - "podAntiAffinity": { - SchemaProps: spec.SchemaProps{ - Description: "Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).", - Ref: ref("k8s.io/api/core/v1.PodAntiAffinity"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.NodeAffinity", "k8s.io/api/core/v1.PodAffinity", "k8s.io/api/core/v1.PodAntiAffinity"}, - } -} - -func schema_k8sio_api_core_v1_AttachedVolume(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "AttachedVolume describes a volume attached to a node", - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name of the attached volume", - Type: []string{"string"}, - Format: "", - }, - }, - "devicePath": { - SchemaProps: spec.SchemaProps{ - Description: "DevicePath represents the device path where the volume should be available", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"name", "devicePath"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_AvoidPods(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "AvoidPods describes pods that should avoid this node. This is the value for a Node annotation with key scheduler.alpha.kubernetes.io/preferAvoidPods and will eventually become a field of NodeStatus.", - Properties: map[string]spec.Schema{ - "preferAvoidPods": { - SchemaProps: spec.SchemaProps{ - Description: "Bounded-sized list of signatures of pods that should avoid this node, sorted in timestamp order from oldest to newest. Size of the slice is unspecified.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.PreferAvoidPodsEntry"), - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.PreferAvoidPodsEntry"}, - } -} - -func schema_k8sio_api_core_v1_AzureDiskVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", - Properties: map[string]spec.Schema{ - "diskName": { - SchemaProps: spec.SchemaProps{ - Description: "The Name of the data disk in the blob storage", - Type: []string{"string"}, - Format: "", - }, - }, - "diskURI": { - SchemaProps: spec.SchemaProps{ - Description: "The URI the data disk in the blob storage", - Type: []string{"string"}, - Format: "", - }, - }, - "cachingMode": { - SchemaProps: spec.SchemaProps{ - Description: "Host Caching mode: None, Read Only, Read Write.", - Type: []string{"string"}, - Format: "", - }, - }, - "fsType": { - SchemaProps: spec.SchemaProps{ - Description: "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", - Type: []string{"string"}, - Format: "", - }, - }, - "readOnly": { - SchemaProps: spec.SchemaProps{ - Description: "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"diskName", "diskURI"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_AzureFilePersistentVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", - Properties: map[string]spec.Schema{ - "secretName": { - SchemaProps: spec.SchemaProps{ - Description: "the name of secret that contains Azure Storage Account Name and Key", - Type: []string{"string"}, - Format: "", - }, - }, - "shareName": { - SchemaProps: spec.SchemaProps{ - Description: "Share Name", - Type: []string{"string"}, - Format: "", - }, - }, - "readOnly": { - SchemaProps: spec.SchemaProps{ - Description: "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "secretNamespace": { - SchemaProps: spec.SchemaProps{ - Description: "the namespace of the secret that contains Azure Storage Account Name and Key default is the same as the Pod", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"secretName", "shareName"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_AzureFileVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", - Properties: map[string]spec.Schema{ - "secretName": { - SchemaProps: spec.SchemaProps{ - Description: "the name of secret that contains Azure Storage Account Name and Key", - Type: []string{"string"}, - Format: "", - }, - }, - "shareName": { - SchemaProps: spec.SchemaProps{ - Description: "Share Name", - Type: []string{"string"}, - Format: "", - }, - }, - "readOnly": { - SchemaProps: spec.SchemaProps{ - Description: "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - Required: []string{"secretName", "shareName"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_Binding(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Binding ties one object to another; for example, a pod is bound to a node by a scheduler. Deprecated in 1.7, please use the bindings subresource of pods instead.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "target": { - SchemaProps: spec.SchemaProps{ - Description: "The target object that you want to bind to the standard object.", - Ref: ref("k8s.io/api/core/v1.ObjectReference"), - }, - }, - }, - Required: []string{"target"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.ObjectReference", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_k8sio_api_core_v1_CSIPersistentVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Represents storage that is managed by an external CSI volume driver (Beta feature)", - Properties: map[string]spec.Schema{ - "driver": { - SchemaProps: spec.SchemaProps{ - Description: "Driver is the name of the driver to use for this volume. Required.", - Type: []string{"string"}, - Format: "", - }, - }, - "volumeHandle": { - SchemaProps: spec.SchemaProps{ - Description: "VolumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required.", - Type: []string{"string"}, - Format: "", - }, - }, - "readOnly": { - SchemaProps: spec.SchemaProps{ - Description: "Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write).", - Type: []string{"boolean"}, - Format: "", - }, - }, - "fsType": { - SchemaProps: spec.SchemaProps{ - Description: "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\".", - Type: []string{"string"}, - Format: "", - }, - }, - "volumeAttributes": { - SchemaProps: spec.SchemaProps{ - Description: "Attributes of the volume to publish.", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "controllerPublishSecretRef": { - SchemaProps: spec.SchemaProps{ - Description: "ControllerPublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerPublishVolume and ControllerUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", - Ref: ref("k8s.io/api/core/v1.SecretReference"), - }, - }, - "nodeStageSecretRef": { - SchemaProps: spec.SchemaProps{ - Description: "NodeStageSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeStageVolume and NodeStageVolume and NodeUnstageVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", - Ref: ref("k8s.io/api/core/v1.SecretReference"), - }, - }, - "nodePublishSecretRef": { - SchemaProps: spec.SchemaProps{ - Description: "NodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", - Ref: ref("k8s.io/api/core/v1.SecretReference"), - }, - }, - }, - Required: []string{"driver", "volumeHandle"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.SecretReference"}, - } -} - -func schema_k8sio_api_core_v1_Capabilities(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Adds and removes POSIX capabilities from running containers.", - Properties: map[string]spec.Schema{ - "add": { - SchemaProps: spec.SchemaProps{ - Description: "Added capabilities", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "drop": { - SchemaProps: spec.SchemaProps{ - Description: "Removed capabilities", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_CephFSPersistentVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling.", - Properties: map[string]spec.Schema{ - "monitors": { - SchemaProps: spec.SchemaProps{ - Description: "Required: Monitors is a collection of Ceph monitors More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "path": { - SchemaProps: spec.SchemaProps{ - Description: "Optional: Used as the mounted root, rather than the full Ceph tree, default is /", - Type: []string{"string"}, - Format: "", - }, - }, - "user": { - SchemaProps: spec.SchemaProps{ - Description: "Optional: User is the rados user name, default is admin More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", - Type: []string{"string"}, - Format: "", - }, - }, - "secretFile": { - SchemaProps: spec.SchemaProps{ - Description: "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", - Type: []string{"string"}, - Format: "", - }, - }, - "secretRef": { - SchemaProps: spec.SchemaProps{ - Description: "Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", - Ref: ref("k8s.io/api/core/v1.SecretReference"), - }, - }, - "readOnly": { - SchemaProps: spec.SchemaProps{ - Description: "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - Required: []string{"monitors"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.SecretReference"}, - } -} - -func schema_k8sio_api_core_v1_CephFSVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling.", - Properties: map[string]spec.Schema{ - "monitors": { - SchemaProps: spec.SchemaProps{ - Description: "Required: Monitors is a collection of Ceph monitors More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "path": { - SchemaProps: spec.SchemaProps{ - Description: "Optional: Used as the mounted root, rather than the full Ceph tree, default is /", - Type: []string{"string"}, - Format: "", - }, - }, - "user": { - SchemaProps: spec.SchemaProps{ - Description: "Optional: User is the rados user name, default is admin More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", - Type: []string{"string"}, - Format: "", - }, - }, - "secretFile": { - SchemaProps: spec.SchemaProps{ - Description: "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", - Type: []string{"string"}, - Format: "", - }, - }, - "secretRef": { - SchemaProps: spec.SchemaProps{ - Description: "Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), - }, - }, - "readOnly": { - SchemaProps: spec.SchemaProps{ - Description: "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - Required: []string{"monitors"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.LocalObjectReference"}, - } -} - -func schema_k8sio_api_core_v1_CinderPersistentVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.", - Properties: map[string]spec.Schema{ - "volumeID": { - SchemaProps: spec.SchemaProps{ - Description: "volume id used to identify the volume in cinder More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", - Type: []string{"string"}, - Format: "", - }, - }, - "fsType": { - SchemaProps: spec.SchemaProps{ - Description: "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", - Type: []string{"string"}, - Format: "", - }, - }, - "readOnly": { - SchemaProps: spec.SchemaProps{ - Description: "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", - Type: []string{"boolean"}, - Format: "", - }, - }, - "secretRef": { - SchemaProps: spec.SchemaProps{ - Description: "Optional: points to a secret object containing parameters used to connect to OpenStack.", - Ref: ref("k8s.io/api/core/v1.SecretReference"), - }, - }, - }, - Required: []string{"volumeID"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.SecretReference"}, - } -} - -func schema_k8sio_api_core_v1_CinderVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.", - Properties: map[string]spec.Schema{ - "volumeID": { - SchemaProps: spec.SchemaProps{ - Description: "volume id used to identify the volume in cinder More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", - Type: []string{"string"}, - Format: "", - }, - }, - "fsType": { - SchemaProps: spec.SchemaProps{ - Description: "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", - Type: []string{"string"}, - Format: "", - }, - }, - "readOnly": { - SchemaProps: spec.SchemaProps{ - Description: "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", - Type: []string{"boolean"}, - Format: "", - }, - }, - "secretRef": { - SchemaProps: spec.SchemaProps{ - Description: "Optional: points to a secret object containing parameters used to connect to OpenStack.", - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), - }, - }, - }, - Required: []string{"volumeID"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.LocalObjectReference"}, - } -} - -func schema_k8sio_api_core_v1_ClientIPConfig(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ClientIPConfig represents the configurations of Client IP based session affinity.", - Properties: map[string]spec.Schema{ - "timeoutSeconds": { - SchemaProps: spec.SchemaProps{ - Description: "timeoutSeconds specifies the seconds of ClientIP type session sticky time. The value must be >0 && <=86400(for 1 day) if ServiceAffinity == \"ClientIP\". Default value is 10800(for 3 hours).", - Type: []string{"integer"}, - Format: "int32", - }, - }, - }, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_ComponentCondition(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Information about the condition of a component.", - Properties: map[string]spec.Schema{ - "type": { - SchemaProps: spec.SchemaProps{ - Description: "Type of condition for a component. Valid value: \"Healthy\"", - Type: []string{"string"}, - Format: "", - }, - }, - "status": { - SchemaProps: spec.SchemaProps{ - Description: "Status of the condition for a component. Valid values for \"Healthy\": \"True\", \"False\", or \"Unknown\".", - Type: []string{"string"}, - Format: "", - }, - }, - "message": { - SchemaProps: spec.SchemaProps{ - Description: "Message about the condition for a component. For example, information about a health check.", - Type: []string{"string"}, - Format: "", - }, - }, - "error": { - SchemaProps: spec.SchemaProps{ - Description: "Condition error code for a component. For example, a health check error code.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"type", "status"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_ComponentStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ComponentStatus (and ComponentStatusList) holds the cluster validation info.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "conditions": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "type", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "List of component conditions observed", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.ComponentCondition"), - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.ComponentCondition", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_k8sio_api_core_v1_ComponentStatusList(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Status of all the conditions for the component as a list of ComponentStatus objects.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), - }, - }, - "items": { - SchemaProps: spec.SchemaProps{ - Description: "List of ComponentStatus objects.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.ComponentStatus"), - }, - }, - }, - }, - }, - }, - Required: []string{"items"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.ComponentStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, - } -} - -func schema_k8sio_api_core_v1_ConfigMap(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ConfigMap holds configuration data for pods to consume.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "data": { - SchemaProps: spec.SchemaProps{ - Description: "Data contains the configuration data. Each key must consist of alphanumeric characters, '-', '_' or '.'. Values with non-UTF-8 byte sequences must use the BinaryData field. The keys stored in Data must not overlap with the keys in the BinaryData field, this is enforced during validation process.", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "binaryData": { - SchemaProps: spec.SchemaProps{ - Description: "BinaryData contains the binary data. Each key must consist of alphanumeric characters, '-', '_' or '.'. BinaryData can contain byte sequences that are not in the UTF-8 range. The keys stored in BinaryData must not overlap with the ones in the Data field, this is enforced during validation process. Using this field will require 1.10+ apiserver and kubelet.", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "byte", - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_k8sio_api_core_v1_ConfigMapEnvSource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ConfigMapEnvSource selects a ConfigMap to populate the environment variables with.\n\nThe contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables.", - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", - Type: []string{"string"}, - Format: "", - }, - }, - "optional": { - SchemaProps: spec.SchemaProps{ - Description: "Specify whether the ConfigMap must be defined", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_ConfigMapKeySelector(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Selects a key from a ConfigMap.", - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", - Type: []string{"string"}, - Format: "", - }, - }, - "key": { - SchemaProps: spec.SchemaProps{ - Description: "The key to select.", - Type: []string{"string"}, - Format: "", - }, - }, - "optional": { - SchemaProps: spec.SchemaProps{ - Description: "Specify whether the ConfigMap or it's key must be defined", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - Required: []string{"key"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_ConfigMapList(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ConfigMapList is a resource containing a list of ConfigMap objects.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), - }, - }, - "items": { - SchemaProps: spec.SchemaProps{ - Description: "Items is the list of ConfigMaps.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.ConfigMap"), - }, - }, - }, - }, - }, - }, - Required: []string{"items"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.ConfigMap", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, - } -} - -func schema_k8sio_api_core_v1_ConfigMapNodeConfigSource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node.", - Properties: map[string]spec.Schema{ - "namespace": { - SchemaProps: spec.SchemaProps{ - Description: "Namespace is the metadata.namespace of the referenced ConfigMap. This field is required in all cases.", - Type: []string{"string"}, - Format: "", - }, - }, - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name is the metadata.name of the referenced ConfigMap. This field is required in all cases.", - Type: []string{"string"}, - Format: "", - }, - }, - "uid": { - SchemaProps: spec.SchemaProps{ - Description: "UID is the metadata.UID of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status.", - Type: []string{"string"}, - Format: "", - }, - }, - "resourceVersion": { - SchemaProps: spec.SchemaProps{ - Description: "ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status.", - Type: []string{"string"}, - Format: "", - }, - }, - "kubeletConfigKey": { - SchemaProps: spec.SchemaProps{ - Description: "KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure This field is required in all cases.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"namespace", "name", "kubeletConfigKey"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_ConfigMapProjection(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Adapts a ConfigMap into a projected volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a projected volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. Note that this is identical to a configmap volume source without the default mode.", - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", - Type: []string{"string"}, - Format: "", - }, - }, - "items": { - SchemaProps: spec.SchemaProps{ - Description: "If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.KeyToPath"), - }, - }, - }, - }, - }, - "optional": { - SchemaProps: spec.SchemaProps{ - Description: "Specify whether the ConfigMap or it's keys must be defined", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.KeyToPath"}, - } -} - -func schema_k8sio_api_core_v1_ConfigMapVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Adapts a ConfigMap into a volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. ConfigMap volumes support ownership management and SELinux relabeling.", - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", - Type: []string{"string"}, - Format: "", - }, - }, - "items": { - SchemaProps: spec.SchemaProps{ - Description: "If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.KeyToPath"), - }, - }, - }, - }, - }, - "defaultMode": { - SchemaProps: spec.SchemaProps{ - Description: "Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "optional": { - SchemaProps: spec.SchemaProps{ - Description: "Specify whether the ConfigMap or it's keys must be defined", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.KeyToPath"}, - } -} - -func schema_k8sio_api_core_v1_Container(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "A single application container that you want to run within a pod.", - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.", - Type: []string{"string"}, - Format: "", - }, - }, - "image": { - SchemaProps: spec.SchemaProps{ - Description: "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", - Type: []string{"string"}, - Format: "", - }, - }, - "command": { - SchemaProps: spec.SchemaProps{ - Description: "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "args": { - SchemaProps: spec.SchemaProps{ - Description: "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "workingDir": { - SchemaProps: spec.SchemaProps{ - Description: "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", - Type: []string{"string"}, - Format: "", - }, - }, - "ports": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-list-map-keys": []string{ - "containerPort", - "protocol", - }, - "x-kubernetes-list-type": "map", - "x-kubernetes-patch-merge-key": "containerPort", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.ContainerPort"), - }, - }, - }, - }, - }, - "envFrom": { - SchemaProps: spec.SchemaProps{ - Description: "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.EnvFromSource"), - }, - }, - }, - }, - }, - "env": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "List of environment variables to set in the container. Cannot be updated.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.EnvVar"), - }, - }, - }, - }, - }, - "resources": { - SchemaProps: spec.SchemaProps{ - Description: "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/", - Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), - }, - }, - "volumeMounts": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "mountPath", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "Pod volumes to mount into the container's filesystem. Cannot be updated.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.VolumeMount"), - }, - }, - }, - }, - }, - "volumeDevices": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "devicePath", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "volumeDevices is the list of block devices to be used by the container. This is a beta feature.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.VolumeDevice"), - }, - }, - }, - }, - }, - "livenessProbe": { - SchemaProps: spec.SchemaProps{ - Description: "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", - Ref: ref("k8s.io/api/core/v1.Probe"), - }, - }, - "readinessProbe": { - SchemaProps: spec.SchemaProps{ - Description: "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", - Ref: ref("k8s.io/api/core/v1.Probe"), - }, - }, - "lifecycle": { - SchemaProps: spec.SchemaProps{ - Description: "Actions that the management system should take in response to container lifecycle events. Cannot be updated.", - Ref: ref("k8s.io/api/core/v1.Lifecycle"), - }, - }, - "terminationMessagePath": { - SchemaProps: spec.SchemaProps{ - Description: "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", - Type: []string{"string"}, - Format: "", - }, - }, - "terminationMessagePolicy": { - SchemaProps: spec.SchemaProps{ - Description: "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", - Type: []string{"string"}, - Format: "", - }, - }, - "imagePullPolicy": { - SchemaProps: spec.SchemaProps{ - Description: "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", - Type: []string{"string"}, - Format: "", - }, - }, - "securityContext": { - SchemaProps: spec.SchemaProps{ - Description: "Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", - Ref: ref("k8s.io/api/core/v1.SecurityContext"), - }, - }, - "stdin": { - SchemaProps: spec.SchemaProps{ - Description: "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "stdinOnce": { - SchemaProps: spec.SchemaProps{ - Description: "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", - Type: []string{"boolean"}, - Format: "", - }, - }, - "tty": { - SchemaProps: spec.SchemaProps{ - Description: "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - Required: []string{"name"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.ContainerPort", "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeDevice", "k8s.io/api/core/v1.VolumeMount"}, - } -} - -func schema_k8sio_api_core_v1_ContainerImage(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Describe a container image", - Properties: map[string]spec.Schema{ - "names": { - SchemaProps: spec.SchemaProps{ - Description: "Names by which this image is known. e.g. [\"k8s.gcr.io/hyperkube:v1.0.7\", \"dockerhub.io/google_containers/hyperkube:v1.0.7\"]", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "sizeBytes": { - SchemaProps: spec.SchemaProps{ - Description: "The size of the image in bytes.", - Type: []string{"integer"}, - Format: "int64", - }, - }, - }, - Required: []string{"names"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_ContainerPort(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ContainerPort represents a network port in a single container.", - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services.", - Type: []string{"string"}, - Format: "", - }, - }, - "hostPort": { - SchemaProps: spec.SchemaProps{ - Description: "Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "containerPort": { - SchemaProps: spec.SchemaProps{ - Description: "Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "protocol": { - SchemaProps: spec.SchemaProps{ - Description: "Protocol for port. Must be UDP, TCP, or SCTP. Defaults to \"TCP\".", - Type: []string{"string"}, - Format: "", - }, - }, - "hostIP": { - SchemaProps: spec.SchemaProps{ - Description: "What host IP to bind the external port to.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"containerPort"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_ContainerState(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ContainerState holds a possible state of container. Only one of its members may be specified. If none of them is specified, the default one is ContainerStateWaiting.", - Properties: map[string]spec.Schema{ - "waiting": { - SchemaProps: spec.SchemaProps{ - Description: "Details about a waiting container", - Ref: ref("k8s.io/api/core/v1.ContainerStateWaiting"), - }, - }, - "running": { - SchemaProps: spec.SchemaProps{ - Description: "Details about a running container", - Ref: ref("k8s.io/api/core/v1.ContainerStateRunning"), - }, - }, - "terminated": { - SchemaProps: spec.SchemaProps{ - Description: "Details about a terminated container", - Ref: ref("k8s.io/api/core/v1.ContainerStateTerminated"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.ContainerStateRunning", "k8s.io/api/core/v1.ContainerStateTerminated", "k8s.io/api/core/v1.ContainerStateWaiting"}, - } -} - -func schema_k8sio_api_core_v1_ContainerStateRunning(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ContainerStateRunning is a running state of a container.", - Properties: map[string]spec.Schema{ - "startedAt": { - SchemaProps: spec.SchemaProps{ - Description: "Time at which the container was last (re-)started", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, - } -} - -func schema_k8sio_api_core_v1_ContainerStateTerminated(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ContainerStateTerminated is a terminated state of a container.", - Properties: map[string]spec.Schema{ - "exitCode": { - SchemaProps: spec.SchemaProps{ - Description: "Exit status from the last termination of the container", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "signal": { - SchemaProps: spec.SchemaProps{ - Description: "Signal from the last termination of the container", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "reason": { - SchemaProps: spec.SchemaProps{ - Description: "(brief) reason from the last termination of the container", - Type: []string{"string"}, - Format: "", - }, - }, - "message": { - SchemaProps: spec.SchemaProps{ - Description: "Message regarding the last termination of the container", - Type: []string{"string"}, - Format: "", - }, - }, - "startedAt": { - SchemaProps: spec.SchemaProps{ - Description: "Time at which previous execution of the container started", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), - }, - }, - "finishedAt": { - SchemaProps: spec.SchemaProps{ - Description: "Time at which the container last terminated", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), - }, - }, - "containerID": { - SchemaProps: spec.SchemaProps{ - Description: "Container's ID in the format 'docker://'", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"exitCode"}, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, - } -} - -func schema_k8sio_api_core_v1_ContainerStateWaiting(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ContainerStateWaiting is a waiting state of a container.", - Properties: map[string]spec.Schema{ - "reason": { - SchemaProps: spec.SchemaProps{ - Description: "(brief) reason the container is not yet running.", - Type: []string{"string"}, - Format: "", - }, - }, - "message": { - SchemaProps: spec.SchemaProps{ - Description: "Message regarding why the container is not yet running.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_ContainerStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ContainerStatus contains details for the current status of this container.", - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "This must be a DNS_LABEL. Each container in a pod must have a unique name. Cannot be updated.", - Type: []string{"string"}, - Format: "", - }, - }, - "state": { - SchemaProps: spec.SchemaProps{ - Description: "Details about the container's current condition.", - Ref: ref("k8s.io/api/core/v1.ContainerState"), - }, - }, - "lastState": { - SchemaProps: spec.SchemaProps{ - Description: "Details about the container's last termination condition.", - Ref: ref("k8s.io/api/core/v1.ContainerState"), - }, - }, - "ready": { - SchemaProps: spec.SchemaProps{ - Description: "Specifies whether the container has passed its readiness probe.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "restartCount": { - SchemaProps: spec.SchemaProps{ - Description: "The number of times the container has been restarted, currently based on the number of dead containers that have not yet been removed. Note that this is calculated from dead containers. But those containers are subject to garbage collection. This value will get capped at 5 by GC.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "image": { - SchemaProps: spec.SchemaProps{ - Description: "The image the container is running. More info: https://kubernetes.io/docs/concepts/containers/images", - Type: []string{"string"}, - Format: "", - }, - }, - "imageID": { - SchemaProps: spec.SchemaProps{ - Description: "ImageID of the container's image.", - Type: []string{"string"}, - Format: "", - }, - }, - "containerID": { - SchemaProps: spec.SchemaProps{ - Description: "Container's ID in the format 'docker://'.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"name", "ready", "restartCount", "image", "imageID"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.ContainerState"}, - } -} - -func schema_k8sio_api_core_v1_DaemonEndpoint(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "DaemonEndpoint contains information about a single Daemon endpoint.", - Properties: map[string]spec.Schema{ - "Port": { - SchemaProps: spec.SchemaProps{ - Description: "Port number of the given endpoint.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - }, - Required: []string{"Port"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_DownwardAPIProjection(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Represents downward API info for projecting into a projected volume. Note that this is identical to a downwardAPI volume source without the default mode.", - Properties: map[string]spec.Schema{ - "items": { - SchemaProps: spec.SchemaProps{ - Description: "Items is a list of DownwardAPIVolume file", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.DownwardAPIVolumeFile"), - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.DownwardAPIVolumeFile"}, - } -} - -func schema_k8sio_api_core_v1_DownwardAPIVolumeFile(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "DownwardAPIVolumeFile represents information to create the file containing the pod field", - Properties: map[string]spec.Schema{ - "path": { - SchemaProps: spec.SchemaProps{ - Description: "Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'", - Type: []string{"string"}, - Format: "", - }, - }, - "fieldRef": { - SchemaProps: spec.SchemaProps{ - Description: "Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.", - Ref: ref("k8s.io/api/core/v1.ObjectFieldSelector"), - }, - }, - "resourceFieldRef": { - SchemaProps: spec.SchemaProps{ - Description: "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.", - Ref: ref("k8s.io/api/core/v1.ResourceFieldSelector"), - }, - }, - "mode": { - SchemaProps: spec.SchemaProps{ - Description: "Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - }, - Required: []string{"path"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.ObjectFieldSelector", "k8s.io/api/core/v1.ResourceFieldSelector"}, - } -} - -func schema_k8sio_api_core_v1_DownwardAPIVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "DownwardAPIVolumeSource represents a volume containing downward API info. Downward API volumes support ownership management and SELinux relabeling.", - Properties: map[string]spec.Schema{ - "items": { - SchemaProps: spec.SchemaProps{ - Description: "Items is a list of downward API volume file", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.DownwardAPIVolumeFile"), - }, - }, - }, - }, - }, - "defaultMode": { - SchemaProps: spec.SchemaProps{ - Description: "Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.DownwardAPIVolumeFile"}, - } -} - -func schema_k8sio_api_core_v1_EmptyDirVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Represents an empty directory for a pod. Empty directory volumes support ownership management and SELinux relabeling.", - Properties: map[string]spec.Schema{ - "medium": { - SchemaProps: spec.SchemaProps{ - Description: "What type of storage medium should back this directory. The default is \"\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", - Type: []string{"string"}, - Format: "", - }, - }, - "sizeLimit": { - SchemaProps: spec.SchemaProps{ - Description: "Total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir", - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/api/resource.Quantity"}, - } -} - -func schema_k8sio_api_core_v1_EndpointAddress(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "EndpointAddress is a tuple that describes single IP address.", - Properties: map[string]spec.Schema{ - "ip": { - SchemaProps: spec.SchemaProps{ - Description: "The IP of this endpoint. May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), or link-local multicast ((224.0.0.0/24). IPv6 is also accepted but not fully supported on all platforms. Also, certain kubernetes components, like kube-proxy, are not IPv6 ready.", - Type: []string{"string"}, - Format: "", - }, - }, - "hostname": { - SchemaProps: spec.SchemaProps{ - Description: "The Hostname of this endpoint", - Type: []string{"string"}, - Format: "", - }, - }, - "nodeName": { - SchemaProps: spec.SchemaProps{ - Description: "Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node.", - Type: []string{"string"}, - Format: "", - }, - }, - "targetRef": { - SchemaProps: spec.SchemaProps{ - Description: "Reference to object providing the endpoint.", - Ref: ref("k8s.io/api/core/v1.ObjectReference"), - }, - }, - }, - Required: []string{"ip"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.ObjectReference"}, - } -} - -func schema_k8sio_api_core_v1_EndpointPort(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "EndpointPort is a tuple that describes a single port.", - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "The name of this port (corresponds to ServicePort.Name). Must be a DNS_LABEL. Optional only if one port is defined.", - Type: []string{"string"}, - Format: "", - }, - }, - "port": { - SchemaProps: spec.SchemaProps{ - Description: "The port number of the endpoint.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "protocol": { - SchemaProps: spec.SchemaProps{ - Description: "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"port"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_EndpointSubset(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "EndpointSubset is a group of addresses with a common set of ports. The expanded set of endpoints is the Cartesian product of Addresses x Ports. For example, given:\n {\n Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n }\nThe resulting set of endpoints can be viewed as:\n a: [ 10.10.1.1:8675, 10.10.2.2:8675 ],\n b: [ 10.10.1.1:309, 10.10.2.2:309 ]", - Properties: map[string]spec.Schema{ - "addresses": { - SchemaProps: spec.SchemaProps{ - Description: "IP addresses which offer the related ports that are marked as ready. These endpoints should be considered safe for load balancers and clients to utilize.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.EndpointAddress"), - }, - }, - }, - }, - }, - "notReadyAddresses": { - SchemaProps: spec.SchemaProps{ - Description: "IP addresses which offer the related ports but are not currently marked as ready because they have not yet finished starting, have recently failed a readiness check, or have recently failed a liveness check.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.EndpointAddress"), - }, - }, - }, - }, - }, - "ports": { - SchemaProps: spec.SchemaProps{ - Description: "Port numbers available on the related IP addresses.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.EndpointPort"), - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.EndpointAddress", "k8s.io/api/core/v1.EndpointPort"}, - } -} - -func schema_k8sio_api_core_v1_Endpoints(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Endpoints is a collection of endpoints that implement the actual service. Example:\n Name: \"mysvc\",\n Subsets: [\n {\n Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n },\n {\n Addresses: [{\"ip\": \"10.10.3.3\"}],\n Ports: [{\"name\": \"a\", \"port\": 93}, {\"name\": \"b\", \"port\": 76}]\n },\n ]", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "subsets": { - SchemaProps: spec.SchemaProps{ - Description: "The set of all endpoints is the union of all subsets. Addresses are placed into subsets according to the IPs they share. A single address with multiple ports, some of which are ready and some of which are not (because they come from different containers) will result in the address being displayed in different subsets for the different ports. No address will appear in both Addresses and NotReadyAddresses in the same subset. Sets of addresses and ports that comprise a service.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.EndpointSubset"), - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.EndpointSubset", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_k8sio_api_core_v1_EndpointsList(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "EndpointsList is a list of endpoints.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), - }, - }, - "items": { - SchemaProps: spec.SchemaProps{ - Description: "List of endpoints.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.Endpoints"), - }, - }, - }, - }, - }, - }, - Required: []string{"items"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.Endpoints", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, - } -} - -func schema_k8sio_api_core_v1_EnvFromSource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "EnvFromSource represents the source of a set of ConfigMaps", - Properties: map[string]spec.Schema{ - "prefix": { - SchemaProps: spec.SchemaProps{ - Description: "An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.", - Type: []string{"string"}, - Format: "", - }, - }, - "configMapRef": { - SchemaProps: spec.SchemaProps{ - Description: "The ConfigMap to select from", - Ref: ref("k8s.io/api/core/v1.ConfigMapEnvSource"), - }, - }, - "secretRef": { - SchemaProps: spec.SchemaProps{ - Description: "The Secret to select from", - Ref: ref("k8s.io/api/core/v1.SecretEnvSource"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.ConfigMapEnvSource", "k8s.io/api/core/v1.SecretEnvSource"}, - } -} - -func schema_k8sio_api_core_v1_EnvVar(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "EnvVar represents an environment variable present in a Container.", - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name of the environment variable. Must be a C_IDENTIFIER.", - Type: []string{"string"}, - Format: "", - }, - }, - "value": { - SchemaProps: spec.SchemaProps{ - Description: "Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to \"\".", - Type: []string{"string"}, - Format: "", - }, - }, - "valueFrom": { - SchemaProps: spec.SchemaProps{ - Description: "Source for the environment variable's value. Cannot be used if value is not empty.", - Ref: ref("k8s.io/api/core/v1.EnvVarSource"), - }, - }, - }, - Required: []string{"name"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.EnvVarSource"}, - } -} - -func schema_k8sio_api_core_v1_EnvVarSource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "EnvVarSource represents a source for the value of an EnvVar.", - Properties: map[string]spec.Schema{ - "fieldRef": { - SchemaProps: spec.SchemaProps{ - Description: "Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP.", - Ref: ref("k8s.io/api/core/v1.ObjectFieldSelector"), - }, - }, - "resourceFieldRef": { - SchemaProps: spec.SchemaProps{ - Description: "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.", - Ref: ref("k8s.io/api/core/v1.ResourceFieldSelector"), - }, - }, - "configMapKeyRef": { - SchemaProps: spec.SchemaProps{ - Description: "Selects a key of a ConfigMap.", - Ref: ref("k8s.io/api/core/v1.ConfigMapKeySelector"), - }, - }, - "secretKeyRef": { - SchemaProps: spec.SchemaProps{ - Description: "Selects a key of a secret in the pod's namespace", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.ConfigMapKeySelector", "k8s.io/api/core/v1.ObjectFieldSelector", "k8s.io/api/core/v1.ResourceFieldSelector", "k8s.io/api/core/v1.SecretKeySelector"}, - } -} - -func schema_k8sio_api_core_v1_Event(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Event is a report of an event somewhere in the cluster.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "involvedObject": { - SchemaProps: spec.SchemaProps{ - Description: "The object that this event is about.", - Ref: ref("k8s.io/api/core/v1.ObjectReference"), - }, - }, - "reason": { - SchemaProps: spec.SchemaProps{ - Description: "This should be a short, machine understandable string that gives the reason for the transition into the object's current status.", - Type: []string{"string"}, - Format: "", - }, - }, - "message": { - SchemaProps: spec.SchemaProps{ - Description: "A human-readable description of the status of this operation.", - Type: []string{"string"}, - Format: "", - }, - }, - "source": { - SchemaProps: spec.SchemaProps{ - Description: "The component reporting this event. Should be a short machine understandable string.", - Ref: ref("k8s.io/api/core/v1.EventSource"), - }, - }, - "firstTimestamp": { - SchemaProps: spec.SchemaProps{ - Description: "The time at which the event was first recorded. (Time of server receipt is in TypeMeta.)", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), - }, - }, - "lastTimestamp": { - SchemaProps: spec.SchemaProps{ - Description: "The time at which the most recent occurrence of this event was recorded.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), - }, - }, - "count": { - SchemaProps: spec.SchemaProps{ - Description: "The number of times this event has occurred.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "type": { - SchemaProps: spec.SchemaProps{ - Description: "Type of this event (Normal, Warning), new types could be added in the future", - Type: []string{"string"}, - Format: "", - }, - }, - "eventTime": { - SchemaProps: spec.SchemaProps{ - Description: "Time when this Event was first observed.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.MicroTime"), - }, - }, - "series": { - SchemaProps: spec.SchemaProps{ - Description: "Data about the Event series this event represents or nil if it's a singleton Event.", - Ref: ref("k8s.io/api/core/v1.EventSeries"), - }, - }, - "action": { - SchemaProps: spec.SchemaProps{ - Description: "What action was taken/failed regarding to the Regarding object.", - Type: []string{"string"}, - Format: "", - }, - }, - "related": { - SchemaProps: spec.SchemaProps{ - Description: "Optional secondary object for more complex actions.", - Ref: ref("k8s.io/api/core/v1.ObjectReference"), - }, - }, - "reportingComponent": { - SchemaProps: spec.SchemaProps{ - Description: "Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`.", - Type: []string{"string"}, - Format: "", - }, - }, - "reportingInstance": { - SchemaProps: spec.SchemaProps{ - Description: "ID of the controller instance, e.g. `kubelet-xyzf`.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"metadata", "involvedObject"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.EventSeries", "k8s.io/api/core/v1.EventSource", "k8s.io/api/core/v1.ObjectReference", "k8s.io/apimachinery/pkg/apis/meta/v1.MicroTime", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, - } -} - -func schema_k8sio_api_core_v1_EventList(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "EventList is a list of events.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), - }, - }, - "items": { - SchemaProps: spec.SchemaProps{ - Description: "List of events", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.Event"), - }, - }, - }, - }, - }, - }, - Required: []string{"items"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.Event", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, - } -} - -func schema_k8sio_api_core_v1_EventSeries(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "EventSeries contain information on series of events, i.e. thing that was/is happening continuously for some time.", - Properties: map[string]spec.Schema{ - "count": { - SchemaProps: spec.SchemaProps{ - Description: "Number of occurrences in this series up to the last heartbeat time", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "lastObservedTime": { - SchemaProps: spec.SchemaProps{ - Description: "Time of the last occurrence observed", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.MicroTime"), - }, - }, - "state": { - SchemaProps: spec.SchemaProps{ - Description: "State of this Series: Ongoing or Finished", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.MicroTime"}, - } -} - -func schema_k8sio_api_core_v1_EventSource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "EventSource contains information for an event.", - Properties: map[string]spec.Schema{ - "component": { - SchemaProps: spec.SchemaProps{ - Description: "Component from which the event is generated.", - Type: []string{"string"}, - Format: "", - }, - }, - "host": { - SchemaProps: spec.SchemaProps{ - Description: "Node name on which the event is generated.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_ExecAction(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ExecAction describes a \"run in container\" action.", - Properties: map[string]spec.Schema{ - "command": { - SchemaProps: spec.SchemaProps{ - Description: "Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_FCVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling.", - Properties: map[string]spec.Schema{ - "targetWWNs": { - SchemaProps: spec.SchemaProps{ - Description: "Optional: FC target worldwide names (WWNs)", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "lun": { - SchemaProps: spec.SchemaProps{ - Description: "Optional: FC target lun number", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "fsType": { - SchemaProps: spec.SchemaProps{ - Description: "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", - Type: []string{"string"}, - Format: "", - }, - }, - "readOnly": { - SchemaProps: spec.SchemaProps{ - Description: "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "wwids": { - SchemaProps: spec.SchemaProps{ - Description: "Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_FlexPersistentVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "FlexPersistentVolumeSource represents a generic persistent volume resource that is provisioned/attached using an exec based plugin.", - Properties: map[string]spec.Schema{ - "driver": { - SchemaProps: spec.SchemaProps{ - Description: "Driver is the name of the driver to use for this volume.", - Type: []string{"string"}, - Format: "", - }, - }, - "fsType": { - SchemaProps: spec.SchemaProps{ - Description: "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.", - Type: []string{"string"}, - Format: "", - }, - }, - "secretRef": { - SchemaProps: spec.SchemaProps{ - Description: "Optional: SecretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.", - Ref: ref("k8s.io/api/core/v1.SecretReference"), - }, - }, - "readOnly": { - SchemaProps: spec.SchemaProps{ - Description: "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "options": { - SchemaProps: spec.SchemaProps{ - Description: "Optional: Extra command options if any.", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - }, - Required: []string{"driver"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.SecretReference"}, - } -} - -func schema_k8sio_api_core_v1_FlexVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", - Properties: map[string]spec.Schema{ - "driver": { - SchemaProps: spec.SchemaProps{ - Description: "Driver is the name of the driver to use for this volume.", - Type: []string{"string"}, - Format: "", - }, - }, - "fsType": { - SchemaProps: spec.SchemaProps{ - Description: "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.", - Type: []string{"string"}, - Format: "", - }, - }, - "secretRef": { - SchemaProps: spec.SchemaProps{ - Description: "Optional: SecretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.", - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), - }, - }, - "readOnly": { - SchemaProps: spec.SchemaProps{ - Description: "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "options": { - SchemaProps: spec.SchemaProps{ - Description: "Optional: Extra command options if any.", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - }, - Required: []string{"driver"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.LocalObjectReference"}, - } -} - -func schema_k8sio_api_core_v1_FlockerVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Represents a Flocker volume mounted by the Flocker agent. One and only one of datasetName and datasetUUID should be set. Flocker volumes do not support ownership management or SELinux relabeling.", - Properties: map[string]spec.Schema{ - "datasetName": { - SchemaProps: spec.SchemaProps{ - Description: "Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated", - Type: []string{"string"}, - Format: "", - }, - }, - "datasetUUID": { - SchemaProps: spec.SchemaProps{ - Description: "UUID of the dataset. This is unique identifier of a Flocker dataset", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_GCEPersistentDiskVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Represents a Persistent Disk resource in Google Compute Engine.\n\nA GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling.", - Properties: map[string]spec.Schema{ - "pdName": { - SchemaProps: spec.SchemaProps{ - Description: "Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", - Type: []string{"string"}, - Format: "", - }, - }, - "fsType": { - SchemaProps: spec.SchemaProps{ - Description: "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", - Type: []string{"string"}, - Format: "", - }, - }, - "partition": { - SchemaProps: spec.SchemaProps{ - Description: "The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "readOnly": { - SchemaProps: spec.SchemaProps{ - Description: "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - Required: []string{"pdName"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_GitRepoVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.\n\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.", - Properties: map[string]spec.Schema{ - "repository": { - SchemaProps: spec.SchemaProps{ - Description: "Repository URL", - Type: []string{"string"}, - Format: "", - }, - }, - "revision": { - SchemaProps: spec.SchemaProps{ - Description: "Commit hash for the specified revision.", - Type: []string{"string"}, - Format: "", - }, - }, - "directory": { - SchemaProps: spec.SchemaProps{ - Description: "Target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"repository"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_GlusterfsPersistentVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.", - Properties: map[string]spec.Schema{ - "endpoints": { - SchemaProps: spec.SchemaProps{ - Description: "EndpointsName is the endpoint name that details Glusterfs topology. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", - Type: []string{"string"}, - Format: "", - }, - }, - "path": { - SchemaProps: spec.SchemaProps{ - Description: "Path is the Glusterfs volume path. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", - Type: []string{"string"}, - Format: "", - }, - }, - "readOnly": { - SchemaProps: spec.SchemaProps{ - Description: "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", - Type: []string{"boolean"}, - Format: "", - }, - }, - "endpointsNamespace": { - SchemaProps: spec.SchemaProps{ - Description: "EndpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"endpoints", "path"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_GlusterfsVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.", - Properties: map[string]spec.Schema{ - "endpoints": { - SchemaProps: spec.SchemaProps{ - Description: "EndpointsName is the endpoint name that details Glusterfs topology. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", - Type: []string{"string"}, - Format: "", - }, - }, - "path": { - SchemaProps: spec.SchemaProps{ - Description: "Path is the Glusterfs volume path. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", - Type: []string{"string"}, - Format: "", - }, - }, - "readOnly": { - SchemaProps: spec.SchemaProps{ - Description: "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - Required: []string{"endpoints", "path"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_HTTPGetAction(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "HTTPGetAction describes an action based on HTTP Get requests.", - Properties: map[string]spec.Schema{ - "path": { - SchemaProps: spec.SchemaProps{ - Description: "Path to access on the HTTP server.", - Type: []string{"string"}, - Format: "", - }, - }, - "port": { - SchemaProps: spec.SchemaProps{ - Description: "Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.", - Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), - }, - }, - "host": { - SchemaProps: spec.SchemaProps{ - Description: "Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead.", - Type: []string{"string"}, - Format: "", - }, - }, - "scheme": { - SchemaProps: spec.SchemaProps{ - Description: "Scheme to use for connecting to the host. Defaults to HTTP.", - Type: []string{"string"}, - Format: "", - }, - }, - "httpHeaders": { - SchemaProps: spec.SchemaProps{ - Description: "Custom headers to set in the request. HTTP allows repeated headers.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.HTTPHeader"), - }, - }, - }, - }, - }, - }, - Required: []string{"port"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.HTTPHeader", "k8s.io/apimachinery/pkg/util/intstr.IntOrString"}, - } -} - -func schema_k8sio_api_core_v1_HTTPHeader(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "HTTPHeader describes a custom header to be used in HTTP probes", - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "The header field name", - Type: []string{"string"}, - Format: "", - }, - }, - "value": { - SchemaProps: spec.SchemaProps{ - Description: "The header field value", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"name", "value"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_Handler(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Handler defines a specific action that should be taken", - Properties: map[string]spec.Schema{ - "exec": { - SchemaProps: spec.SchemaProps{ - Description: "One and only one of the following should be specified. Exec specifies the action to take.", - Ref: ref("k8s.io/api/core/v1.ExecAction"), - }, - }, - "httpGet": { - SchemaProps: spec.SchemaProps{ - Description: "HTTPGet specifies the http request to perform.", - Ref: ref("k8s.io/api/core/v1.HTTPGetAction"), - }, - }, - "tcpSocket": { - SchemaProps: spec.SchemaProps{ - Description: "TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported", - Ref: ref("k8s.io/api/core/v1.TCPSocketAction"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.ExecAction", "k8s.io/api/core/v1.HTTPGetAction", "k8s.io/api/core/v1.TCPSocketAction"}, - } -} - -func schema_k8sio_api_core_v1_HostAlias(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file.", - Properties: map[string]spec.Schema{ - "ip": { - SchemaProps: spec.SchemaProps{ - Description: "IP address of the host file entry.", - Type: []string{"string"}, - Format: "", - }, - }, - "hostnames": { - SchemaProps: spec.SchemaProps{ - Description: "Hostnames for the above IP address.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_HostPathVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling.", - Properties: map[string]spec.Schema{ - "path": { - SchemaProps: spec.SchemaProps{ - Description: "Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", - Type: []string{"string"}, - Format: "", - }, - }, - "type": { - SchemaProps: spec.SchemaProps{ - Description: "Type for HostPath Volume Defaults to \"\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"path"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_ISCSIPersistentVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ISCSIPersistentVolumeSource represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling.", - Properties: map[string]spec.Schema{ - "targetPortal": { - SchemaProps: spec.SchemaProps{ - Description: "iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", - Type: []string{"string"}, - Format: "", - }, - }, - "iqn": { - SchemaProps: spec.SchemaProps{ - Description: "Target iSCSI Qualified Name.", - Type: []string{"string"}, - Format: "", - }, - }, - "lun": { - SchemaProps: spec.SchemaProps{ - Description: "iSCSI Target Lun number.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "iscsiInterface": { - SchemaProps: spec.SchemaProps{ - Description: "iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).", - Type: []string{"string"}, - Format: "", - }, - }, - "fsType": { - SchemaProps: spec.SchemaProps{ - Description: "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi", - Type: []string{"string"}, - Format: "", - }, - }, - "readOnly": { - SchemaProps: spec.SchemaProps{ - Description: "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "portals": { - SchemaProps: spec.SchemaProps{ - Description: "iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "chapAuthDiscovery": { - SchemaProps: spec.SchemaProps{ - Description: "whether support iSCSI Discovery CHAP authentication", - Type: []string{"boolean"}, - Format: "", - }, - }, - "chapAuthSession": { - SchemaProps: spec.SchemaProps{ - Description: "whether support iSCSI Session CHAP authentication", - Type: []string{"boolean"}, - Format: "", - }, - }, - "secretRef": { - SchemaProps: spec.SchemaProps{ - Description: "CHAP Secret for iSCSI target and initiator authentication", - Ref: ref("k8s.io/api/core/v1.SecretReference"), - }, - }, - "initiatorName": { - SchemaProps: spec.SchemaProps{ - Description: "Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"targetPortal", "iqn", "lun"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.SecretReference"}, - } -} - -func schema_k8sio_api_core_v1_ISCSIVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling.", - Properties: map[string]spec.Schema{ - "targetPortal": { - SchemaProps: spec.SchemaProps{ - Description: "iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", - Type: []string{"string"}, - Format: "", - }, - }, - "iqn": { - SchemaProps: spec.SchemaProps{ - Description: "Target iSCSI Qualified Name.", - Type: []string{"string"}, - Format: "", - }, - }, - "lun": { - SchemaProps: spec.SchemaProps{ - Description: "iSCSI Target Lun number.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "iscsiInterface": { - SchemaProps: spec.SchemaProps{ - Description: "iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).", - Type: []string{"string"}, - Format: "", - }, - }, - "fsType": { - SchemaProps: spec.SchemaProps{ - Description: "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi", - Type: []string{"string"}, - Format: "", - }, - }, - "readOnly": { - SchemaProps: spec.SchemaProps{ - Description: "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "portals": { - SchemaProps: spec.SchemaProps{ - Description: "iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "chapAuthDiscovery": { - SchemaProps: spec.SchemaProps{ - Description: "whether support iSCSI Discovery CHAP authentication", - Type: []string{"boolean"}, - Format: "", - }, - }, - "chapAuthSession": { - SchemaProps: spec.SchemaProps{ - Description: "whether support iSCSI Session CHAP authentication", - Type: []string{"boolean"}, - Format: "", - }, - }, - "secretRef": { - SchemaProps: spec.SchemaProps{ - Description: "CHAP Secret for iSCSI target and initiator authentication", - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), - }, - }, - "initiatorName": { - SchemaProps: spec.SchemaProps{ - Description: "Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"targetPortal", "iqn", "lun"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.LocalObjectReference"}, - } -} - -func schema_k8sio_api_core_v1_KeyToPath(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Maps a string key to a path within a volume.", - Properties: map[string]spec.Schema{ - "key": { - SchemaProps: spec.SchemaProps{ - Description: "The key to project.", - Type: []string{"string"}, - Format: "", - }, - }, - "path": { - SchemaProps: spec.SchemaProps{ - Description: "The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.", - Type: []string{"string"}, - Format: "", - }, - }, - "mode": { - SchemaProps: spec.SchemaProps{ - Description: "Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - }, - Required: []string{"key", "path"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_Lifecycle(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted.", - Properties: map[string]spec.Schema{ - "postStart": { - SchemaProps: spec.SchemaProps{ - Description: "PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks", - Ref: ref("k8s.io/api/core/v1.Handler"), - }, - }, - "preStop": { - SchemaProps: spec.SchemaProps{ - Description: "PreStop is called immediately before a container is terminated. The container is terminated after the handler completes. The reason for termination is passed to the handler. Regardless of the outcome of the handler, the container is eventually terminated. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks", - Ref: ref("k8s.io/api/core/v1.Handler"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.Handler"}, - } -} - -func schema_k8sio_api_core_v1_LimitRange(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "LimitRange sets resource usage limits for each kind of resource in a Namespace.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "spec": { - SchemaProps: spec.SchemaProps{ - Description: "Spec defines the limits enforced. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - Ref: ref("k8s.io/api/core/v1.LimitRangeSpec"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.LimitRangeSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_k8sio_api_core_v1_LimitRangeItem(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "LimitRangeItem defines a min/max usage limit for any resource that matches on kind.", - Properties: map[string]spec.Schema{ - "type": { - SchemaProps: spec.SchemaProps{ - Description: "Type of resource that this limit applies to.", - Type: []string{"string"}, - Format: "", - }, - }, - "max": { - SchemaProps: spec.SchemaProps{ - Description: "Max usage constraints on this kind by resource name.", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), - }, - }, - }, - }, - }, - "min": { - SchemaProps: spec.SchemaProps{ - Description: "Min usage constraints on this kind by resource name.", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), - }, - }, - }, - }, - }, - "default": { - SchemaProps: spec.SchemaProps{ - Description: "Default resource requirement limit value by resource name if resource limit is omitted.", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), - }, - }, - }, - }, - }, - "defaultRequest": { - SchemaProps: spec.SchemaProps{ - Description: "DefaultRequest is the default resource requirement request value by resource name if resource request is omitted.", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), - }, - }, - }, - }, - }, - "maxLimitRequestRatio": { - SchemaProps: spec.SchemaProps{ - Description: "MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource.", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/api/resource.Quantity"}, - } -} - -func schema_k8sio_api_core_v1_LimitRangeList(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "LimitRangeList is a list of LimitRange items.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), - }, - }, - "items": { - SchemaProps: spec.SchemaProps{ - Description: "Items is a list of LimitRange objects. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.LimitRange"), - }, - }, - }, - }, - }, - }, - Required: []string{"items"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.LimitRange", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, - } -} - -func schema_k8sio_api_core_v1_LimitRangeSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "LimitRangeSpec defines a min/max usage limit for resources that match on kind.", - Properties: map[string]spec.Schema{ - "limits": { - SchemaProps: spec.SchemaProps{ - Description: "Limits is the list of LimitRangeItem objects that are enforced.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.LimitRangeItem"), - }, - }, - }, - }, - }, - }, - Required: []string{"limits"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.LimitRangeItem"}, - } -} - -func schema_k8sio_api_core_v1_List(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "List holds a list of objects, which may not be known by the server.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), - }, - }, - "items": { - SchemaProps: spec.SchemaProps{ - Description: "List of objects", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), - }, - }, - }, - }, - }, - }, - Required: []string{"items"}, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta", "k8s.io/apimachinery/pkg/runtime.RawExtension"}, - } -} - -func schema_k8sio_api_core_v1_LoadBalancerIngress(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "LoadBalancerIngress represents the status of a load-balancer ingress point: traffic intended for the service should be sent to an ingress point.", - Properties: map[string]spec.Schema{ - "ip": { - SchemaProps: spec.SchemaProps{ - Description: "IP is set for load-balancer ingress points that are IP based (typically GCE or OpenStack load-balancers)", - Type: []string{"string"}, - Format: "", - }, - }, - "hostname": { - SchemaProps: spec.SchemaProps{ - Description: "Hostname is set for load-balancer ingress points that are DNS based (typically AWS load-balancers)", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_LoadBalancerStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "LoadBalancerStatus represents the status of a load-balancer.", - Properties: map[string]spec.Schema{ - "ingress": { - SchemaProps: spec.SchemaProps{ - Description: "Ingress is a list containing ingress points for the load-balancer. Traffic intended for the service should be sent to these ingress points.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.LoadBalancerIngress"), - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.LoadBalancerIngress"}, - } -} - -func schema_k8sio_api_core_v1_LocalObjectReference(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.", - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_LocalVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Local represents directly-attached storage with node affinity (Beta feature)", - Properties: map[string]spec.Schema{ - "path": { - SchemaProps: spec.SchemaProps{ - Description: "The full path to the volume on the node. It can be either a directory or block device (disk, partition, ...).", - Type: []string{"string"}, - Format: "", - }, - }, - "fsType": { - SchemaProps: spec.SchemaProps{ - Description: "Filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default value is to auto-select a fileystem if unspecified.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"path"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_NFSVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling.", - Properties: map[string]spec.Schema{ - "server": { - SchemaProps: spec.SchemaProps{ - Description: "Server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", - Type: []string{"string"}, - Format: "", - }, - }, - "path": { - SchemaProps: spec.SchemaProps{ - Description: "Path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", - Type: []string{"string"}, - Format: "", - }, - }, - "readOnly": { - SchemaProps: spec.SchemaProps{ - Description: "ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - Required: []string{"server", "path"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_Namespace(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Namespace provides a scope for Names. Use of multiple namespaces is optional.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "spec": { - SchemaProps: spec.SchemaProps{ - Description: "Spec defines the behavior of the Namespace. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - Ref: ref("k8s.io/api/core/v1.NamespaceSpec"), - }, - }, - "status": { - SchemaProps: spec.SchemaProps{ - Description: "Status describes the current status of a Namespace. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - Ref: ref("k8s.io/api/core/v1.NamespaceStatus"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.NamespaceSpec", "k8s.io/api/core/v1.NamespaceStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_k8sio_api_core_v1_NamespaceList(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "NamespaceList is a list of Namespaces.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), - }, - }, - "items": { - SchemaProps: spec.SchemaProps{ - Description: "Items is the list of Namespace objects in the list. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.Namespace"), - }, - }, - }, - }, - }, - }, - Required: []string{"items"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.Namespace", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, - } -} - -func schema_k8sio_api_core_v1_NamespaceSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "NamespaceSpec describes the attributes on a Namespace.", - Properties: map[string]spec.Schema{ - "finalizers": { - SchemaProps: spec.SchemaProps{ - Description: "Finalizers is an opaque list of values that must be empty to permanently remove object from storage. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_NamespaceStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "NamespaceStatus is information about the current status of a Namespace.", - Properties: map[string]spec.Schema{ - "phase": { - SchemaProps: spec.SchemaProps{ - Description: "Phase is the current lifecycle phase of the namespace. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_Node(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Node is a worker node in Kubernetes. Each node will have a unique identifier in the cache (i.e. in etcd).", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "spec": { - SchemaProps: spec.SchemaProps{ - Description: "Spec defines the behavior of a node. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - Ref: ref("k8s.io/api/core/v1.NodeSpec"), - }, - }, - "status": { - SchemaProps: spec.SchemaProps{ - Description: "Most recently observed status of the node. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - Ref: ref("k8s.io/api/core/v1.NodeStatus"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.NodeSpec", "k8s.io/api/core/v1.NodeStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_k8sio_api_core_v1_NodeAddress(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "NodeAddress contains information for the node's address.", - Properties: map[string]spec.Schema{ - "type": { - SchemaProps: spec.SchemaProps{ - Description: "Node address type, one of Hostname, ExternalIP or InternalIP.", - Type: []string{"string"}, - Format: "", - }, - }, - "address": { - SchemaProps: spec.SchemaProps{ - Description: "The node address.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"type", "address"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_NodeAffinity(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Node affinity is a group of node affinity scheduling rules.", - Properties: map[string]spec.Schema{ - "requiredDuringSchedulingIgnoredDuringExecution": { - SchemaProps: spec.SchemaProps{ - Description: "If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.", - Ref: ref("k8s.io/api/core/v1.NodeSelector"), - }, - }, - "preferredDuringSchedulingIgnoredDuringExecution": { - SchemaProps: spec.SchemaProps{ - Description: "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.PreferredSchedulingTerm"), - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.NodeSelector", "k8s.io/api/core/v1.PreferredSchedulingTerm"}, - } -} - -func schema_k8sio_api_core_v1_NodeCondition(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "NodeCondition contains condition information for a node.", - Properties: map[string]spec.Schema{ - "type": { - SchemaProps: spec.SchemaProps{ - Description: "Type of node condition.", - Type: []string{"string"}, - Format: "", - }, - }, - "status": { - SchemaProps: spec.SchemaProps{ - Description: "Status of the condition, one of True, False, Unknown.", - Type: []string{"string"}, - Format: "", - }, - }, - "lastHeartbeatTime": { - SchemaProps: spec.SchemaProps{ - Description: "Last time we got an update on a given condition.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), - }, - }, - "lastTransitionTime": { - SchemaProps: spec.SchemaProps{ - Description: "Last time the condition transit from one status to another.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), - }, - }, - "reason": { - SchemaProps: spec.SchemaProps{ - Description: "(brief) reason for the condition's last transition.", - Type: []string{"string"}, - Format: "", - }, - }, - "message": { - SchemaProps: spec.SchemaProps{ - Description: "Human readable message indicating details about last transition.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"type", "status"}, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, - } -} - -func schema_k8sio_api_core_v1_NodeConfigSource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil.", - Properties: map[string]spec.Schema{ - "configMap": { - SchemaProps: spec.SchemaProps{ - Description: "ConfigMap is a reference to a Node's ConfigMap", - Ref: ref("k8s.io/api/core/v1.ConfigMapNodeConfigSource"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.ConfigMapNodeConfigSource"}, - } -} - -func schema_k8sio_api_core_v1_NodeConfigStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource.", - Properties: map[string]spec.Schema{ - "assigned": { - SchemaProps: spec.SchemaProps{ - Description: "Assigned reports the checkpointed config the node will try to use. When Node.Spec.ConfigSource is updated, the node checkpoints the associated config payload to local disk, along with a record indicating intended config. The node refers to this record to choose its config checkpoint, and reports this record in Assigned. Assigned only updates in the status after the record has been checkpointed to disk. When the Kubelet is restarted, it tries to make the Assigned config the Active config by loading and validating the checkpointed payload identified by Assigned.", - Ref: ref("k8s.io/api/core/v1.NodeConfigSource"), - }, - }, - "active": { - SchemaProps: spec.SchemaProps{ - Description: "Active reports the checkpointed config the node is actively using. Active will represent either the current version of the Assigned config, or the current LastKnownGood config, depending on whether attempting to use the Assigned config results in an error.", - Ref: ref("k8s.io/api/core/v1.NodeConfigSource"), - }, - }, - "lastKnownGood": { - SchemaProps: spec.SchemaProps{ - Description: "LastKnownGood reports the checkpointed config the node will fall back to when it encounters an error attempting to use the Assigned config. The Assigned config becomes the LastKnownGood config when the node determines that the Assigned config is stable and correct. This is currently implemented as a 10-minute soak period starting when the local record of Assigned config is updated. If the Assigned config is Active at the end of this period, it becomes the LastKnownGood. Note that if Spec.ConfigSource is reset to nil (use local defaults), the LastKnownGood is also immediately reset to nil, because the local default config is always assumed good. You should not make assumptions about the node's method of determining config stability and correctness, as this may change or become configurable in the future.", - Ref: ref("k8s.io/api/core/v1.NodeConfigSource"), - }, - }, - "error": { - SchemaProps: spec.SchemaProps{ - Description: "Error describes any problems reconciling the Spec.ConfigSource to the Active config. Errors may occur, for example, attempting to checkpoint Spec.ConfigSource to the local Assigned record, attempting to checkpoint the payload associated with Spec.ConfigSource, attempting to load or validate the Assigned config, etc. Errors may occur at different points while syncing config. Earlier errors (e.g. download or checkpointing errors) will not result in a rollback to LastKnownGood, and may resolve across Kubelet retries. Later errors (e.g. loading or validating a checkpointed config) will result in a rollback to LastKnownGood. In the latter case, it is usually possible to resolve the error by fixing the config assigned in Spec.ConfigSource. You can find additional information for debugging by searching the error message in the Kubelet log. Error is a human-readable description of the error state; machines can check whether or not Error is empty, but should not rely on the stability of the Error text across Kubelet versions.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.NodeConfigSource"}, - } -} - -func schema_k8sio_api_core_v1_NodeDaemonEndpoints(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "NodeDaemonEndpoints lists ports opened by daemons running on the Node.", - Properties: map[string]spec.Schema{ - "kubeletEndpoint": { - SchemaProps: spec.SchemaProps{ - Description: "Endpoint on which Kubelet is listening.", - Ref: ref("k8s.io/api/core/v1.DaemonEndpoint"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.DaemonEndpoint"}, - } -} - -func schema_k8sio_api_core_v1_NodeList(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "NodeList is the whole list of all Nodes which have been registered with master.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), - }, - }, - "items": { - SchemaProps: spec.SchemaProps{ - Description: "List of nodes", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.Node"), - }, - }, - }, - }, - }, - }, - Required: []string{"items"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.Node", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, - } -} - -func schema_k8sio_api_core_v1_NodeProxyOptions(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "NodeProxyOptions is the query options to a Node's proxy call.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "path": { - SchemaProps: spec.SchemaProps{ - Description: "Path is the URL path to use for the current proxy request to node.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_NodeResources(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "NodeResources is an object for conveying resource information about a node. see http://releases.k8s.io/HEAD/docs/design/resources.md for more details.", - Properties: map[string]spec.Schema{ - "Capacity": { - SchemaProps: spec.SchemaProps{ - Description: "Capacity represents the available resources of a node", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), - }, - }, - }, - }, - }, - }, - Required: []string{"Capacity"}, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/api/resource.Quantity"}, - } -} - -func schema_k8sio_api_core_v1_NodeSelector(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms.", - Properties: map[string]spec.Schema{ - "nodeSelectorTerms": { - SchemaProps: spec.SchemaProps{ - Description: "Required. A list of node selector terms. The terms are ORed.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.NodeSelectorTerm"), - }, - }, - }, - }, - }, - }, - Required: []string{"nodeSelectorTerms"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.NodeSelectorTerm"}, - } -} - -func schema_k8sio_api_core_v1_NodeSelectorRequirement(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", - Properties: map[string]spec.Schema{ - "key": { - SchemaProps: spec.SchemaProps{ - Description: "The label key that the selector applies to.", - Type: []string{"string"}, - Format: "", - }, - }, - "operator": { - SchemaProps: spec.SchemaProps{ - Description: "Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.", - Type: []string{"string"}, - Format: "", - }, - }, - "values": { - SchemaProps: spec.SchemaProps{ - Description: "An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - }, - Required: []string{"key", "operator"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_NodeSelectorTerm(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.", - Properties: map[string]spec.Schema{ - "matchExpressions": { - SchemaProps: spec.SchemaProps{ - Description: "A list of node selector requirements by node's labels.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.NodeSelectorRequirement"), - }, - }, - }, - }, - }, - "matchFields": { - SchemaProps: spec.SchemaProps{ - Description: "A list of node selector requirements by node's fields.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.NodeSelectorRequirement"), - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.NodeSelectorRequirement"}, - } -} - -func schema_k8sio_api_core_v1_NodeSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "NodeSpec describes the attributes that a node is created with.", - Properties: map[string]spec.Schema{ - "podCIDR": { - SchemaProps: spec.SchemaProps{ - Description: "PodCIDR represents the pod IP range assigned to the node.", - Type: []string{"string"}, - Format: "", - }, - }, - "providerID": { - SchemaProps: spec.SchemaProps{ - Description: "ID of the node assigned by the cloud provider in the format: ://", - Type: []string{"string"}, - Format: "", - }, - }, - "unschedulable": { - SchemaProps: spec.SchemaProps{ - Description: "Unschedulable controls node schedulability of new pods. By default, node is schedulable. More info: https://kubernetes.io/docs/concepts/nodes/node/#manual-node-administration", - Type: []string{"boolean"}, - Format: "", - }, - }, - "taints": { - SchemaProps: spec.SchemaProps{ - Description: "If specified, the node's taints.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.Taint"), - }, - }, - }, - }, - }, - "configSource": { - SchemaProps: spec.SchemaProps{ - Description: "If specified, the source to get node configuration from The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field", - Ref: ref("k8s.io/api/core/v1.NodeConfigSource"), - }, - }, - "externalID": { - SchemaProps: spec.SchemaProps{ - Description: "Deprecated. Not all kubelets will set this field. Remove field after 1.13. see: https://issues.k8s.io/61966", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.NodeConfigSource", "k8s.io/api/core/v1.Taint"}, - } -} - -func schema_k8sio_api_core_v1_NodeStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "NodeStatus is information about the current status of a node.", - Properties: map[string]spec.Schema{ - "capacity": { - SchemaProps: spec.SchemaProps{ - Description: "Capacity represents the total resources of a node. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), - }, - }, - }, - }, - }, - "allocatable": { - SchemaProps: spec.SchemaProps{ - Description: "Allocatable represents the resources of a node that are available for scheduling. Defaults to Capacity.", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), - }, - }, - }, - }, - }, - "phase": { - SchemaProps: spec.SchemaProps{ - Description: "NodePhase is the recently observed lifecycle phase of the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#phase The field is never populated, and now is deprecated.", - Type: []string{"string"}, - Format: "", - }, - }, - "conditions": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "type", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "Conditions is an array of current observed node conditions. More info: https://kubernetes.io/docs/concepts/nodes/node/#condition", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.NodeCondition"), - }, - }, - }, - }, - }, - "addresses": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "type", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.NodeAddress"), - }, - }, - }, - }, - }, - "daemonEndpoints": { - SchemaProps: spec.SchemaProps{ - Description: "Endpoints of daemons running on the Node.", - Ref: ref("k8s.io/api/core/v1.NodeDaemonEndpoints"), - }, - }, - "nodeInfo": { - SchemaProps: spec.SchemaProps{ - Description: "Set of ids/uuids to uniquely identify the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#info", - Ref: ref("k8s.io/api/core/v1.NodeSystemInfo"), - }, - }, - "images": { - SchemaProps: spec.SchemaProps{ - Description: "List of container images on this node", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.ContainerImage"), - }, - }, - }, - }, - }, - "volumesInUse": { - SchemaProps: spec.SchemaProps{ - Description: "List of attachable volumes in use (mounted) by the node.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "volumesAttached": { - SchemaProps: spec.SchemaProps{ - Description: "List of volumes that are attached to the node.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.AttachedVolume"), - }, - }, - }, - }, - }, - "config": { - SchemaProps: spec.SchemaProps{ - Description: "Status of the config assigned to the node via the dynamic Kubelet config feature.", - Ref: ref("k8s.io/api/core/v1.NodeConfigStatus"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.AttachedVolume", "k8s.io/api/core/v1.ContainerImage", "k8s.io/api/core/v1.NodeAddress", "k8s.io/api/core/v1.NodeCondition", "k8s.io/api/core/v1.NodeConfigStatus", "k8s.io/api/core/v1.NodeDaemonEndpoints", "k8s.io/api/core/v1.NodeSystemInfo", "k8s.io/apimachinery/pkg/api/resource.Quantity"}, - } -} - -func schema_k8sio_api_core_v1_NodeSystemInfo(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "NodeSystemInfo is a set of ids/uuids to uniquely identify the node.", - Properties: map[string]spec.Schema{ - "machineID": { - SchemaProps: spec.SchemaProps{ - Description: "MachineID reported by the node. For unique machine identification in the cluster this field is preferred. Learn more from man(5) machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html", - Type: []string{"string"}, - Format: "", - }, - }, - "systemUUID": { - SchemaProps: spec.SchemaProps{ - Description: "SystemUUID reported by the node. For unique machine identification MachineID is preferred. This field is specific to Red Hat hosts https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html", - Type: []string{"string"}, - Format: "", - }, - }, - "bootID": { - SchemaProps: spec.SchemaProps{ - Description: "Boot ID reported by the node.", - Type: []string{"string"}, - Format: "", - }, - }, - "kernelVersion": { - SchemaProps: spec.SchemaProps{ - Description: "Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64).", - Type: []string{"string"}, - Format: "", - }, - }, - "osImage": { - SchemaProps: spec.SchemaProps{ - Description: "OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).", - Type: []string{"string"}, - Format: "", - }, - }, - "containerRuntimeVersion": { - SchemaProps: spec.SchemaProps{ - Description: "ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0).", - Type: []string{"string"}, - Format: "", - }, - }, - "kubeletVersion": { - SchemaProps: spec.SchemaProps{ - Description: "Kubelet Version reported by the node.", - Type: []string{"string"}, - Format: "", - }, - }, - "kubeProxyVersion": { - SchemaProps: spec.SchemaProps{ - Description: "KubeProxy Version reported by the node.", - Type: []string{"string"}, - Format: "", - }, - }, - "operatingSystem": { - SchemaProps: spec.SchemaProps{ - Description: "The Operating System reported by the node", - Type: []string{"string"}, - Format: "", - }, - }, - "architecture": { - SchemaProps: spec.SchemaProps{ - Description: "The Architecture reported by the node", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"machineID", "systemUUID", "bootID", "kernelVersion", "osImage", "containerRuntimeVersion", "kubeletVersion", "kubeProxyVersion", "operatingSystem", "architecture"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_ObjectFieldSelector(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ObjectFieldSelector selects an APIVersioned field of an object.", - Properties: map[string]spec.Schema{ - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "Version of the schema the FieldPath is written in terms of, defaults to \"v1\".", - Type: []string{"string"}, - Format: "", - }, - }, - "fieldPath": { - SchemaProps: spec.SchemaProps{ - Description: "Path of the field to select in the specified API version.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"fieldPath"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_ObjectReference(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ObjectReference contains enough information to let you inspect or modify the referred object.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "namespace": { - SchemaProps: spec.SchemaProps{ - Description: "Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/", - Type: []string{"string"}, - Format: "", - }, - }, - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", - Type: []string{"string"}, - Format: "", - }, - }, - "uid": { - SchemaProps: spec.SchemaProps{ - Description: "UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "API version of the referent.", - Type: []string{"string"}, - Format: "", - }, - }, - "resourceVersion": { - SchemaProps: spec.SchemaProps{ - Description: "Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency", - Type: []string{"string"}, - Format: "", - }, - }, - "fieldPath": { - SchemaProps: spec.SchemaProps{ - Description: "If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \"spec.containers{name}\" (where \"name\" refers to the name of the container that triggered the event) or if no container name is specified \"spec.containers[2]\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_PersistentVolume(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PersistentVolume (PV) is a storage resource provisioned by an administrator. It is analogous to a node. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "spec": { - SchemaProps: spec.SchemaProps{ - Description: "Spec defines a specification of a persistent volume owned by the cluster. Provisioned by an administrator. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes", - Ref: ref("k8s.io/api/core/v1.PersistentVolumeSpec"), - }, - }, - "status": { - SchemaProps: spec.SchemaProps{ - Description: "Status represents the current information/status for the persistent volume. Populated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes", - Ref: ref("k8s.io/api/core/v1.PersistentVolumeStatus"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.PersistentVolumeSpec", "k8s.io/api/core/v1.PersistentVolumeStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_k8sio_api_core_v1_PersistentVolumeClaim(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PersistentVolumeClaim is a user's request for and claim to a persistent volume", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "spec": { - SchemaProps: spec.SchemaProps{ - Description: "Spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", - Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimSpec"), - }, - }, - "status": { - SchemaProps: spec.SchemaProps{ - Description: "Status represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", - Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimStatus"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.PersistentVolumeClaimSpec", "k8s.io/api/core/v1.PersistentVolumeClaimStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_k8sio_api_core_v1_PersistentVolumeClaimCondition(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PersistentVolumeClaimCondition contails details about state of pvc", - Properties: map[string]spec.Schema{ - "type": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - "status": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - "lastProbeTime": { - SchemaProps: spec.SchemaProps{ - Description: "Last time we probed the condition.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), - }, - }, - "lastTransitionTime": { - SchemaProps: spec.SchemaProps{ - Description: "Last time the condition transitioned from one status to another.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), - }, - }, - "reason": { - SchemaProps: spec.SchemaProps{ - Description: "Unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports \"ResizeStarted\" that means the underlying persistent volume is being resized.", - Type: []string{"string"}, - Format: "", - }, - }, - "message": { - SchemaProps: spec.SchemaProps{ - Description: "Human-readable message indicating details about last transition.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"type", "status"}, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, - } -} - -func schema_k8sio_api_core_v1_PersistentVolumeClaimList(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PersistentVolumeClaimList is a list of PersistentVolumeClaim items.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), - }, - }, - "items": { - SchemaProps: spec.SchemaProps{ - Description: "A list of persistent volume claims. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaim"), - }, - }, - }, - }, - }, - }, - Required: []string{"items"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.PersistentVolumeClaim", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, - } -} - -func schema_k8sio_api_core_v1_PersistentVolumeClaimSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes", - Properties: map[string]spec.Schema{ - "accessModes": { - SchemaProps: spec.SchemaProps{ - Description: "AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "selector": { - SchemaProps: spec.SchemaProps{ - Description: "A label query over volumes to consider for binding.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"), - }, - }, - "resources": { - SchemaProps: spec.SchemaProps{ - Description: "Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources", - Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), - }, - }, - "volumeName": { - SchemaProps: spec.SchemaProps{ - Description: "VolumeName is the binding reference to the PersistentVolume backing this claim.", - Type: []string{"string"}, - Format: "", - }, - }, - "storageClassName": { - SchemaProps: spec.SchemaProps{ - Description: "Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1", - Type: []string{"string"}, - Format: "", - }, - }, - "volumeMode": { - SchemaProps: spec.SchemaProps{ - Description: "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. This is a beta feature.", - Type: []string{"string"}, - Format: "", - }, - }, - "dataSource": { - SchemaProps: spec.SchemaProps{ - Description: "This field requires the VolumeSnapshotDataSource alpha feature gate to be enabled and currently VolumeSnapshot is the only supported data source. If the provisioner can support VolumeSnapshot data source, it will create a new volume and data will be restored to the volume at the same time. If the provisioner does not support VolumeSnapshot data source, volume will not be created and the failure will be reported as an event. In the future, we plan to support more data source types and the behavior of the provisioner may change.", - Ref: ref("k8s.io/api/core/v1.TypedLocalObjectReference"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.TypedLocalObjectReference", "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"}, - } -} - -func schema_k8sio_api_core_v1_PersistentVolumeClaimStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PersistentVolumeClaimStatus is the current status of a persistent volume claim.", - Properties: map[string]spec.Schema{ - "phase": { - SchemaProps: spec.SchemaProps{ - Description: "Phase represents the current phase of PersistentVolumeClaim.", - Type: []string{"string"}, - Format: "", - }, - }, - "accessModes": { - SchemaProps: spec.SchemaProps{ - Description: "AccessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "capacity": { - SchemaProps: spec.SchemaProps{ - Description: "Represents the actual resources of the underlying volume.", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), - }, - }, - }, - }, - }, - "conditions": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "type", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "Current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimCondition"), - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.PersistentVolumeClaimCondition", "k8s.io/apimachinery/pkg/api/resource.Quantity"}, - } -} - -func schema_k8sio_api_core_v1_PersistentVolumeClaimVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. This volume finds the bound PV and mounts that volume for the pod. A PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another type of volume that is owned by someone else (the system).", - Properties: map[string]spec.Schema{ - "claimName": { - SchemaProps: spec.SchemaProps{ - Description: "ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", - Type: []string{"string"}, - Format: "", - }, - }, - "readOnly": { - SchemaProps: spec.SchemaProps{ - Description: "Will force the ReadOnly setting in VolumeMounts. Default false.", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - Required: []string{"claimName"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_PersistentVolumeList(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PersistentVolumeList is a list of PersistentVolume items.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), - }, - }, - "items": { - SchemaProps: spec.SchemaProps{ - Description: "List of persistent volumes. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.PersistentVolume"), - }, - }, - }, - }, - }, - }, - Required: []string{"items"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.PersistentVolume", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, - } -} - -func schema_k8sio_api_core_v1_PersistentVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PersistentVolumeSource is similar to VolumeSource but meant for the administrator who creates PVs. Exactly one of its members must be set.", - Properties: map[string]spec.Schema{ - "gcePersistentDisk": { - SchemaProps: spec.SchemaProps{ - Description: "GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", - Ref: ref("k8s.io/api/core/v1.GCEPersistentDiskVolumeSource"), - }, - }, - "awsElasticBlockStore": { - SchemaProps: spec.SchemaProps{ - Description: "AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", - Ref: ref("k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource"), - }, - }, - "hostPath": { - SchemaProps: spec.SchemaProps{ - Description: "HostPath represents a directory on the host. Provisioned by a developer or tester. This is useful for single-node development and testing only! On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", - Ref: ref("k8s.io/api/core/v1.HostPathVolumeSource"), - }, - }, - "glusterfs": { - SchemaProps: spec.SchemaProps{ - Description: "Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md", - Ref: ref("k8s.io/api/core/v1.GlusterfsPersistentVolumeSource"), - }, - }, - "nfs": { - SchemaProps: spec.SchemaProps{ - Description: "NFS represents an NFS mount on the host. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", - Ref: ref("k8s.io/api/core/v1.NFSVolumeSource"), - }, - }, - "rbd": { - SchemaProps: spec.SchemaProps{ - Description: "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md", - Ref: ref("k8s.io/api/core/v1.RBDPersistentVolumeSource"), - }, - }, - "iscsi": { - SchemaProps: spec.SchemaProps{ - Description: "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin.", - Ref: ref("k8s.io/api/core/v1.ISCSIPersistentVolumeSource"), - }, - }, - "cinder": { - SchemaProps: spec.SchemaProps{ - Description: "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", - Ref: ref("k8s.io/api/core/v1.CinderPersistentVolumeSource"), - }, - }, - "cephfs": { - SchemaProps: spec.SchemaProps{ - Description: "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime", - Ref: ref("k8s.io/api/core/v1.CephFSPersistentVolumeSource"), - }, - }, - "fc": { - SchemaProps: spec.SchemaProps{ - Description: "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", - Ref: ref("k8s.io/api/core/v1.FCVolumeSource"), - }, - }, - "flocker": { - SchemaProps: spec.SchemaProps{ - Description: "Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running", - Ref: ref("k8s.io/api/core/v1.FlockerVolumeSource"), - }, - }, - "flexVolume": { - SchemaProps: spec.SchemaProps{ - Description: "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", - Ref: ref("k8s.io/api/core/v1.FlexPersistentVolumeSource"), - }, - }, - "azureFile": { - SchemaProps: spec.SchemaProps{ - Description: "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", - Ref: ref("k8s.io/api/core/v1.AzureFilePersistentVolumeSource"), - }, - }, - "vsphereVolume": { - SchemaProps: spec.SchemaProps{ - Description: "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", - Ref: ref("k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource"), - }, - }, - "quobyte": { - SchemaProps: spec.SchemaProps{ - Description: "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime", - Ref: ref("k8s.io/api/core/v1.QuobyteVolumeSource"), - }, - }, - "azureDisk": { - SchemaProps: spec.SchemaProps{ - Description: "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", - Ref: ref("k8s.io/api/core/v1.AzureDiskVolumeSource"), - }, - }, - "photonPersistentDisk": { - SchemaProps: spec.SchemaProps{ - Description: "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", - Ref: ref("k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource"), - }, - }, - "portworxVolume": { - SchemaProps: spec.SchemaProps{ - Description: "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine", - Ref: ref("k8s.io/api/core/v1.PortworxVolumeSource"), - }, - }, - "scaleIO": { - SchemaProps: spec.SchemaProps{ - Description: "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", - Ref: ref("k8s.io/api/core/v1.ScaleIOPersistentVolumeSource"), - }, - }, - "local": { - SchemaProps: spec.SchemaProps{ - Description: "Local represents directly-attached storage with node affinity", - Ref: ref("k8s.io/api/core/v1.LocalVolumeSource"), - }, - }, - "storageos": { - SchemaProps: spec.SchemaProps{ - Description: "StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md", - Ref: ref("k8s.io/api/core/v1.StorageOSPersistentVolumeSource"), - }, - }, - "csi": { - SchemaProps: spec.SchemaProps{ - Description: "CSI represents storage that handled by an external CSI driver (Beta feature).", - Ref: ref("k8s.io/api/core/v1.CSIPersistentVolumeSource"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource", "k8s.io/api/core/v1.AzureDiskVolumeSource", "k8s.io/api/core/v1.AzureFilePersistentVolumeSource", "k8s.io/api/core/v1.CSIPersistentVolumeSource", "k8s.io/api/core/v1.CephFSPersistentVolumeSource", "k8s.io/api/core/v1.CinderPersistentVolumeSource", "k8s.io/api/core/v1.FCVolumeSource", "k8s.io/api/core/v1.FlexPersistentVolumeSource", "k8s.io/api/core/v1.FlockerVolumeSource", "k8s.io/api/core/v1.GCEPersistentDiskVolumeSource", "k8s.io/api/core/v1.GlusterfsPersistentVolumeSource", "k8s.io/api/core/v1.HostPathVolumeSource", "k8s.io/api/core/v1.ISCSIPersistentVolumeSource", "k8s.io/api/core/v1.LocalVolumeSource", "k8s.io/api/core/v1.NFSVolumeSource", "k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource", "k8s.io/api/core/v1.PortworxVolumeSource", "k8s.io/api/core/v1.QuobyteVolumeSource", "k8s.io/api/core/v1.RBDPersistentVolumeSource", "k8s.io/api/core/v1.ScaleIOPersistentVolumeSource", "k8s.io/api/core/v1.StorageOSPersistentVolumeSource", "k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource"}, - } -} - -func schema_k8sio_api_core_v1_PersistentVolumeSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PersistentVolumeSpec is the specification of a persistent volume.", - Properties: map[string]spec.Schema{ - "capacity": { - SchemaProps: spec.SchemaProps{ - Description: "A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), - }, - }, - }, - }, - }, - "gcePersistentDisk": { - SchemaProps: spec.SchemaProps{ - Description: "GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", - Ref: ref("k8s.io/api/core/v1.GCEPersistentDiskVolumeSource"), - }, - }, - "awsElasticBlockStore": { - SchemaProps: spec.SchemaProps{ - Description: "AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", - Ref: ref("k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource"), - }, - }, - "hostPath": { - SchemaProps: spec.SchemaProps{ - Description: "HostPath represents a directory on the host. Provisioned by a developer or tester. This is useful for single-node development and testing only! On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", - Ref: ref("k8s.io/api/core/v1.HostPathVolumeSource"), - }, - }, - "glusterfs": { - SchemaProps: spec.SchemaProps{ - Description: "Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md", - Ref: ref("k8s.io/api/core/v1.GlusterfsPersistentVolumeSource"), - }, - }, - "nfs": { - SchemaProps: spec.SchemaProps{ - Description: "NFS represents an NFS mount on the host. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", - Ref: ref("k8s.io/api/core/v1.NFSVolumeSource"), - }, - }, - "rbd": { - SchemaProps: spec.SchemaProps{ - Description: "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md", - Ref: ref("k8s.io/api/core/v1.RBDPersistentVolumeSource"), - }, - }, - "iscsi": { - SchemaProps: spec.SchemaProps{ - Description: "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin.", - Ref: ref("k8s.io/api/core/v1.ISCSIPersistentVolumeSource"), - }, - }, - "cinder": { - SchemaProps: spec.SchemaProps{ - Description: "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", - Ref: ref("k8s.io/api/core/v1.CinderPersistentVolumeSource"), - }, - }, - "cephfs": { - SchemaProps: spec.SchemaProps{ - Description: "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime", - Ref: ref("k8s.io/api/core/v1.CephFSPersistentVolumeSource"), - }, - }, - "fc": { - SchemaProps: spec.SchemaProps{ - Description: "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", - Ref: ref("k8s.io/api/core/v1.FCVolumeSource"), - }, - }, - "flocker": { - SchemaProps: spec.SchemaProps{ - Description: "Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running", - Ref: ref("k8s.io/api/core/v1.FlockerVolumeSource"), - }, - }, - "flexVolume": { - SchemaProps: spec.SchemaProps{ - Description: "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", - Ref: ref("k8s.io/api/core/v1.FlexPersistentVolumeSource"), - }, - }, - "azureFile": { - SchemaProps: spec.SchemaProps{ - Description: "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", - Ref: ref("k8s.io/api/core/v1.AzureFilePersistentVolumeSource"), - }, - }, - "vsphereVolume": { - SchemaProps: spec.SchemaProps{ - Description: "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", - Ref: ref("k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource"), - }, - }, - "quobyte": { - SchemaProps: spec.SchemaProps{ - Description: "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime", - Ref: ref("k8s.io/api/core/v1.QuobyteVolumeSource"), - }, - }, - "azureDisk": { - SchemaProps: spec.SchemaProps{ - Description: "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", - Ref: ref("k8s.io/api/core/v1.AzureDiskVolumeSource"), - }, - }, - "photonPersistentDisk": { - SchemaProps: spec.SchemaProps{ - Description: "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", - Ref: ref("k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource"), - }, - }, - "portworxVolume": { - SchemaProps: spec.SchemaProps{ - Description: "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine", - Ref: ref("k8s.io/api/core/v1.PortworxVolumeSource"), - }, - }, - "scaleIO": { - SchemaProps: spec.SchemaProps{ - Description: "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", - Ref: ref("k8s.io/api/core/v1.ScaleIOPersistentVolumeSource"), - }, - }, - "local": { - SchemaProps: spec.SchemaProps{ - Description: "Local represents directly-attached storage with node affinity", - Ref: ref("k8s.io/api/core/v1.LocalVolumeSource"), - }, - }, - "storageos": { - SchemaProps: spec.SchemaProps{ - Description: "StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md", - Ref: ref("k8s.io/api/core/v1.StorageOSPersistentVolumeSource"), - }, - }, - "csi": { - SchemaProps: spec.SchemaProps{ - Description: "CSI represents storage that handled by an external CSI driver (Beta feature).", - Ref: ref("k8s.io/api/core/v1.CSIPersistentVolumeSource"), - }, - }, - "accessModes": { - SchemaProps: spec.SchemaProps{ - Description: "AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "claimRef": { - SchemaProps: spec.SchemaProps{ - Description: "ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. Expected to be non-nil when bound. claim.VolumeName is the authoritative bind between PV and PVC. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#binding", - Ref: ref("k8s.io/api/core/v1.ObjectReference"), - }, - }, - "persistentVolumeReclaimPolicy": { - SchemaProps: spec.SchemaProps{ - Description: "What happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming", - Type: []string{"string"}, - Format: "", - }, - }, - "storageClassName": { - SchemaProps: spec.SchemaProps{ - Description: "Name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass.", - Type: []string{"string"}, - Format: "", - }, - }, - "mountOptions": { - SchemaProps: spec.SchemaProps{ - Description: "A list of mount options, e.g. [\"ro\", \"soft\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "volumeMode": { - SchemaProps: spec.SchemaProps{ - Description: "volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec. This is a beta feature.", - Type: []string{"string"}, - Format: "", - }, - }, - "nodeAffinity": { - SchemaProps: spec.SchemaProps{ - Description: "NodeAffinity defines constraints that limit what nodes this volume can be accessed from. This field influences the scheduling of pods that use this volume.", - Ref: ref("k8s.io/api/core/v1.VolumeNodeAffinity"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource", "k8s.io/api/core/v1.AzureDiskVolumeSource", "k8s.io/api/core/v1.AzureFilePersistentVolumeSource", "k8s.io/api/core/v1.CSIPersistentVolumeSource", "k8s.io/api/core/v1.CephFSPersistentVolumeSource", "k8s.io/api/core/v1.CinderPersistentVolumeSource", "k8s.io/api/core/v1.FCVolumeSource", "k8s.io/api/core/v1.FlexPersistentVolumeSource", "k8s.io/api/core/v1.FlockerVolumeSource", "k8s.io/api/core/v1.GCEPersistentDiskVolumeSource", "k8s.io/api/core/v1.GlusterfsPersistentVolumeSource", "k8s.io/api/core/v1.HostPathVolumeSource", "k8s.io/api/core/v1.ISCSIPersistentVolumeSource", "k8s.io/api/core/v1.LocalVolumeSource", "k8s.io/api/core/v1.NFSVolumeSource", "k8s.io/api/core/v1.ObjectReference", "k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource", "k8s.io/api/core/v1.PortworxVolumeSource", "k8s.io/api/core/v1.QuobyteVolumeSource", "k8s.io/api/core/v1.RBDPersistentVolumeSource", "k8s.io/api/core/v1.ScaleIOPersistentVolumeSource", "k8s.io/api/core/v1.StorageOSPersistentVolumeSource", "k8s.io/api/core/v1.VolumeNodeAffinity", "k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource", "k8s.io/apimachinery/pkg/api/resource.Quantity"}, - } -} - -func schema_k8sio_api_core_v1_PersistentVolumeStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PersistentVolumeStatus is the current status of a persistent volume.", - Properties: map[string]spec.Schema{ - "phase": { - SchemaProps: spec.SchemaProps{ - Description: "Phase indicates if a volume is available, bound to a claim, or released by a claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase", - Type: []string{"string"}, - Format: "", - }, - }, - "message": { - SchemaProps: spec.SchemaProps{ - Description: "A human-readable message indicating details about why the volume is in this state.", - Type: []string{"string"}, - Format: "", - }, - }, - "reason": { - SchemaProps: spec.SchemaProps{ - Description: "Reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_PhotonPersistentDiskVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Represents a Photon Controller persistent disk resource.", - Properties: map[string]spec.Schema{ - "pdID": { - SchemaProps: spec.SchemaProps{ - Description: "ID that identifies Photon Controller persistent disk", - Type: []string{"string"}, - Format: "", - }, - }, - "fsType": { - SchemaProps: spec.SchemaProps{ - Description: "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"pdID"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_Pod(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Pod is a collection of containers that can run on a host. This resource is created by clients and scheduled onto hosts.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "spec": { - SchemaProps: spec.SchemaProps{ - Description: "Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - Ref: ref("k8s.io/api/core/v1.PodSpec"), - }, - }, - "status": { - SchemaProps: spec.SchemaProps{ - Description: "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - Ref: ref("k8s.io/api/core/v1.PodStatus"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.PodSpec", "k8s.io/api/core/v1.PodStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_k8sio_api_core_v1_PodAffinity(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Pod affinity is a group of inter pod affinity scheduling rules.", - Properties: map[string]spec.Schema{ - "requiredDuringSchedulingIgnoredDuringExecution": { - SchemaProps: spec.SchemaProps{ - Description: "If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.PodAffinityTerm"), - }, - }, - }, - }, - }, - "preferredDuringSchedulingIgnoredDuringExecution": { - SchemaProps: spec.SchemaProps{ - Description: "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.WeightedPodAffinityTerm"), - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.PodAffinityTerm", "k8s.io/api/core/v1.WeightedPodAffinityTerm"}, - } -} - -func schema_k8sio_api_core_v1_PodAffinityTerm(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running", - Properties: map[string]spec.Schema{ - "labelSelector": { - SchemaProps: spec.SchemaProps{ - Description: "A label query over a set of resources, in this case pods.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"), - }, - }, - "namespaces": { - SchemaProps: spec.SchemaProps{ - Description: "namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means \"this pod's namespace\"", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "topologyKey": { - SchemaProps: spec.SchemaProps{ - Description: "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"topologyKey"}, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"}, - } -} - -func schema_k8sio_api_core_v1_PodAntiAffinity(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Pod anti affinity is a group of inter pod anti affinity scheduling rules.", - Properties: map[string]spec.Schema{ - "requiredDuringSchedulingIgnoredDuringExecution": { - SchemaProps: spec.SchemaProps{ - Description: "If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.PodAffinityTerm"), - }, - }, - }, - }, - }, - "preferredDuringSchedulingIgnoredDuringExecution": { - SchemaProps: spec.SchemaProps{ - Description: "The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.WeightedPodAffinityTerm"), - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.PodAffinityTerm", "k8s.io/api/core/v1.WeightedPodAffinityTerm"}, - } -} - -func schema_k8sio_api_core_v1_PodAttachOptions(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PodAttachOptions is the query options to a Pod's remote attach call.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "stdin": { - SchemaProps: spec.SchemaProps{ - Description: "Stdin if true, redirects the standard input stream of the pod for this call. Defaults to false.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "stdout": { - SchemaProps: spec.SchemaProps{ - Description: "Stdout if true indicates that stdout is to be redirected for the attach call. Defaults to true.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "stderr": { - SchemaProps: spec.SchemaProps{ - Description: "Stderr if true indicates that stderr is to be redirected for the attach call. Defaults to true.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "tty": { - SchemaProps: spec.SchemaProps{ - Description: "TTY if true indicates that a tty will be allocated for the attach call. This is passed through the container runtime so the tty is allocated on the worker node by the container runtime. Defaults to false.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "container": { - SchemaProps: spec.SchemaProps{ - Description: "The container in which to execute the command. Defaults to only container if there is only one container in the pod.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_PodCondition(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PodCondition contains details for the current condition of this pod.", - Properties: map[string]spec.Schema{ - "type": { - SchemaProps: spec.SchemaProps{ - Description: "Type is the type of the condition. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions", - Type: []string{"string"}, - Format: "", - }, - }, - "status": { - SchemaProps: spec.SchemaProps{ - Description: "Status is the status of the condition. Can be True, False, Unknown. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions", - Type: []string{"string"}, - Format: "", - }, - }, - "lastProbeTime": { - SchemaProps: spec.SchemaProps{ - Description: "Last time we probed the condition.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), - }, - }, - "lastTransitionTime": { - SchemaProps: spec.SchemaProps{ - Description: "Last time the condition transitioned from one status to another.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), - }, - }, - "reason": { - SchemaProps: spec.SchemaProps{ - Description: "Unique, one-word, CamelCase reason for the condition's last transition.", - Type: []string{"string"}, - Format: "", - }, - }, - "message": { - SchemaProps: spec.SchemaProps{ - Description: "Human-readable message indicating details about last transition.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"type", "status"}, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, - } -} - -func schema_k8sio_api_core_v1_PodDNSConfig(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PodDNSConfig defines the DNS parameters of a pod in addition to those generated from DNSPolicy.", - Properties: map[string]spec.Schema{ - "nameservers": { - SchemaProps: spec.SchemaProps{ - Description: "A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "searches": { - SchemaProps: spec.SchemaProps{ - Description: "A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "options": { - SchemaProps: spec.SchemaProps{ - Description: "A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.PodDNSConfigOption"), - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.PodDNSConfigOption"}, - } -} - -func schema_k8sio_api_core_v1_PodDNSConfigOption(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PodDNSConfigOption defines DNS resolver options of a pod.", - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Required.", - Type: []string{"string"}, - Format: "", - }, - }, - "value": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_PodExecOptions(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PodExecOptions is the query options to a Pod's remote exec call.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "stdin": { - SchemaProps: spec.SchemaProps{ - Description: "Redirect the standard input stream of the pod for this call. Defaults to false.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "stdout": { - SchemaProps: spec.SchemaProps{ - Description: "Redirect the standard output stream of the pod for this call. Defaults to true.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "stderr": { - SchemaProps: spec.SchemaProps{ - Description: "Redirect the standard error stream of the pod for this call. Defaults to true.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "tty": { - SchemaProps: spec.SchemaProps{ - Description: "TTY if true indicates that a tty will be allocated for the exec call. Defaults to false.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "container": { - SchemaProps: spec.SchemaProps{ - Description: "Container in which to execute the command. Defaults to only container if there is only one container in the pod.", - Type: []string{"string"}, - Format: "", - }, - }, - "command": { - SchemaProps: spec.SchemaProps{ - Description: "Command is the remote command to execute. argv array. Not executed within a shell.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - }, - Required: []string{"command"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_PodList(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PodList is a list of Pods.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), - }, - }, - "items": { - SchemaProps: spec.SchemaProps{ - Description: "List of pods. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.Pod"), - }, - }, - }, - }, - }, - }, - Required: []string{"items"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.Pod", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, - } -} - -func schema_k8sio_api_core_v1_PodLogOptions(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PodLogOptions is the query options for a Pod's logs REST call.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "container": { - SchemaProps: spec.SchemaProps{ - Description: "The container for which to stream logs. Defaults to only container if there is one container in the pod.", - Type: []string{"string"}, - Format: "", - }, - }, - "follow": { - SchemaProps: spec.SchemaProps{ - Description: "Follow the log stream of the pod. Defaults to false.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "previous": { - SchemaProps: spec.SchemaProps{ - Description: "Return previous terminated container logs. Defaults to false.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "sinceSeconds": { - SchemaProps: spec.SchemaProps{ - Description: "A relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", - Type: []string{"integer"}, - Format: "int64", - }, - }, - "sinceTime": { - SchemaProps: spec.SchemaProps{ - Description: "An RFC3339 timestamp from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), - }, - }, - "timestamps": { - SchemaProps: spec.SchemaProps{ - Description: "If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "tailLines": { - SchemaProps: spec.SchemaProps{ - Description: "If set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime", - Type: []string{"integer"}, - Format: "int64", - }, - }, - "limitBytes": { - SchemaProps: spec.SchemaProps{ - Description: "If set, the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit.", - Type: []string{"integer"}, - Format: "int64", - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, - } -} - -func schema_k8sio_api_core_v1_PodPortForwardOptions(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PodPortForwardOptions is the query options to a Pod's port forward call when using WebSockets. The `port` query parameter must specify the port or ports (comma separated) to forward over. Port forwarding over SPDY does not use these options. It requires the port to be passed in the `port` header as part of request.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "ports": { - SchemaProps: spec.SchemaProps{ - Description: "List of ports to forward Required when using WebSockets", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"integer"}, - Format: "int32", - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_PodProxyOptions(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PodProxyOptions is the query options to a Pod's proxy call.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "path": { - SchemaProps: spec.SchemaProps{ - Description: "Path is the URL path to use for the current proxy request to pod.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_PodReadinessGate(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PodReadinessGate contains the reference to a pod condition", - Properties: map[string]spec.Schema{ - "conditionType": { - SchemaProps: spec.SchemaProps{ - Description: "ConditionType refers to a condition in the pod's condition list with matching type.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"conditionType"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_PodSecurityContext(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.", - Properties: map[string]spec.Schema{ - "seLinuxOptions": { - SchemaProps: spec.SchemaProps{ - Description: "The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", - Ref: ref("k8s.io/api/core/v1.SELinuxOptions"), - }, - }, - "runAsUser": { - SchemaProps: spec.SchemaProps{ - Description: "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", - Type: []string{"integer"}, - Format: "int64", - }, - }, - "runAsGroup": { - SchemaProps: spec.SchemaProps{ - Description: "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", - Type: []string{"integer"}, - Format: "int64", - }, - }, - "runAsNonRoot": { - SchemaProps: spec.SchemaProps{ - Description: "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "supplementalGroups": { - SchemaProps: spec.SchemaProps{ - Description: "A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"integer"}, - Format: "int64", - }, - }, - }, - }, - }, - "fsGroup": { - SchemaProps: spec.SchemaProps{ - Description: "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\n\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\n\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.", - Type: []string{"integer"}, - Format: "int64", - }, - }, - "sysctls": { - SchemaProps: spec.SchemaProps{ - Description: "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.Sysctl"), - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.SELinuxOptions", "k8s.io/api/core/v1.Sysctl"}, - } -} - -func schema_k8sio_api_core_v1_PodSignature(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Describes the class of pods that should avoid this node. Exactly one field should be set.", - Properties: map[string]spec.Schema{ - "podController": { - SchemaProps: spec.SchemaProps{ - Description: "Reference to controller whose pods should avoid this node.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.OwnerReference"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.OwnerReference"}, - } -} - -func schema_k8sio_api_core_v1_PodSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PodSpec is a description of a pod.", - Properties: map[string]spec.Schema{ - "volumes": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge,retainKeys", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.Volume"), - }, - }, - }, - }, - }, - "initContainers": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.Container"), - }, - }, - }, - }, - }, - "containers": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.Container"), - }, - }, - }, - }, - }, - "restartPolicy": { - SchemaProps: spec.SchemaProps{ - Description: "Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy", - Type: []string{"string"}, - Format: "", - }, - }, - "terminationGracePeriodSeconds": { - SchemaProps: spec.SchemaProps{ - Description: "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.", - Type: []string{"integer"}, - Format: "int64", - }, - }, - "activeDeadlineSeconds": { - SchemaProps: spec.SchemaProps{ - Description: "Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer.", - Type: []string{"integer"}, - Format: "int64", - }, - }, - "dnsPolicy": { - SchemaProps: spec.SchemaProps{ - Description: "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.", - Type: []string{"string"}, - Format: "", - }, - }, - "nodeSelector": { - SchemaProps: spec.SchemaProps{ - Description: "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "serviceAccountName": { - SchemaProps: spec.SchemaProps{ - Description: "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", - Type: []string{"string"}, - Format: "", - }, - }, - "serviceAccount": { - SchemaProps: spec.SchemaProps{ - Description: "DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.", - Type: []string{"string"}, - Format: "", - }, - }, - "automountServiceAccountToken": { - SchemaProps: spec.SchemaProps{ - Description: "AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "nodeName": { - SchemaProps: spec.SchemaProps{ - Description: "NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements.", - Type: []string{"string"}, - Format: "", - }, - }, - "hostNetwork": { - SchemaProps: spec.SchemaProps{ - Description: "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "hostPID": { - SchemaProps: spec.SchemaProps{ - Description: "Use the host's pid namespace. Optional: Default to false.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "hostIPC": { - SchemaProps: spec.SchemaProps{ - Description: "Use the host's ipc namespace. Optional: Default to false.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "shareProcessNamespace": { - SchemaProps: spec.SchemaProps{ - Description: "Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false. This field is beta-level and may be disabled with the PodShareProcessNamespace feature.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "securityContext": { - SchemaProps: spec.SchemaProps{ - Description: "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.", - Ref: ref("k8s.io/api/core/v1.PodSecurityContext"), - }, - }, - "imagePullSecrets": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), - }, - }, - }, - }, - }, - "hostname": { - SchemaProps: spec.SchemaProps{ - Description: "Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.", - Type: []string{"string"}, - Format: "", - }, - }, - "subdomain": { - SchemaProps: spec.SchemaProps{ - Description: "If specified, the fully qualified Pod hostname will be \"...svc.\". If not specified, the pod will not have a domainname at all.", - Type: []string{"string"}, - Format: "", - }, - }, - "affinity": { - SchemaProps: spec.SchemaProps{ - Description: "If specified, the pod's scheduling constraints", - Ref: ref("k8s.io/api/core/v1.Affinity"), - }, - }, - "schedulerName": { - SchemaProps: spec.SchemaProps{ - Description: "If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler.", - Type: []string{"string"}, - Format: "", - }, - }, - "tolerations": { - SchemaProps: spec.SchemaProps{ - Description: "If specified, the pod's tolerations.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.Toleration"), - }, - }, - }, - }, - }, - "hostAliases": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "ip", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.HostAlias"), - }, - }, - }, - }, - }, - "priorityClassName": { - SchemaProps: spec.SchemaProps{ - Description: "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.", - Type: []string{"string"}, - Format: "", - }, - }, - "priority": { - SchemaProps: spec.SchemaProps{ - Description: "The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "dnsConfig": { - SchemaProps: spec.SchemaProps{ - Description: "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.", - Ref: ref("k8s.io/api/core/v1.PodDNSConfig"), - }, - }, - "readinessGates": { - SchemaProps: spec.SchemaProps{ - Description: "If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \"True\" More info: https://github.com/kubernetes/community/blob/master/keps/sig-network/0007-pod-ready%2B%2B.md", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.PodReadinessGate"), - }, - }, - }, - }, - }, - "runtimeClassName": { - SchemaProps: spec.SchemaProps{ - Description: "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://github.com/kubernetes/community/blob/master/keps/sig-node/0014-runtime-class.md This is an alpha feature and may change in the future.", - Type: []string{"string"}, - Format: "", - }, - }, - "enableServiceLinks": { - SchemaProps: spec.SchemaProps{ - Description: "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links.", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - Required: []string{"containers"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Container", "k8s.io/api/core/v1.HostAlias", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodReadinessGate", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.Volume"}, - } -} - -func schema_k8sio_api_core_v1_PodStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PodStatus represents information about the status of a pod. Status may trail the actual state of a system, especially if the node that hosts the pod cannot contact the control plane.", - Properties: map[string]spec.Schema{ - "phase": { - SchemaProps: spec.SchemaProps{ - Description: "The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. The conditions array, the reason and message fields, and the individual container status arrays contain more detail about the pod's status. There are five possible phase values:\n\nPending: The pod has been accepted by the Kubernetes system, but one or more of the container images has not been created. This includes time before being scheduled as well as time spent downloading images over the network, which could take a while. Running: The pod has been bound to a node, and all of the containers have been created. At least one container is still running, or is in the process of starting or restarting. Succeeded: All containers in the pod have terminated in success, and will not be restarted. Failed: All containers in the pod have terminated, and at least one container has terminated in failure. The container either exited with non-zero status or was terminated by the system. Unknown: For some reason the state of the pod could not be obtained, typically due to an error in communicating with the host of the pod.\n\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase", - Type: []string{"string"}, - Format: "", - }, - }, - "conditions": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "type", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "Current service state of pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.PodCondition"), - }, - }, - }, - }, - }, - "message": { - SchemaProps: spec.SchemaProps{ - Description: "A human readable message indicating details about why the pod is in this condition.", - Type: []string{"string"}, - Format: "", - }, - }, - "reason": { - SchemaProps: spec.SchemaProps{ - Description: "A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted'", - Type: []string{"string"}, - Format: "", - }, - }, - "nominatedNodeName": { - SchemaProps: spec.SchemaProps{ - Description: "nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be scheduled right away as preemption victims receive their graceful termination periods. This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to give the resources on this node to a higher priority pod that is created after preemption. As a result, this field may be different than PodSpec.nodeName when the pod is scheduled.", - Type: []string{"string"}, - Format: "", - }, - }, - "hostIP": { - SchemaProps: spec.SchemaProps{ - Description: "IP address of the host to which the pod is assigned. Empty if not yet scheduled.", - Type: []string{"string"}, - Format: "", - }, - }, - "podIP": { - SchemaProps: spec.SchemaProps{ - Description: "IP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated.", - Type: []string{"string"}, - Format: "", - }, - }, - "startTime": { - SchemaProps: spec.SchemaProps{ - Description: "RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), - }, - }, - "initContainerStatuses": { - SchemaProps: spec.SchemaProps{ - Description: "The list has one entry per init container in the manifest. The most recent successful init container will have ready = true, the most recently started container will have startTime set. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.ContainerStatus"), - }, - }, - }, - }, - }, - "containerStatuses": { - SchemaProps: spec.SchemaProps{ - Description: "The list has one entry per container in the manifest. Each entry is currently the output of `docker inspect`. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.ContainerStatus"), - }, - }, - }, - }, - }, - "qosClass": { - SchemaProps: spec.SchemaProps{ - Description: "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.ContainerStatus", "k8s.io/api/core/v1.PodCondition", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, - } -} - -func schema_k8sio_api_core_v1_PodStatusResult(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "status": { - SchemaProps: spec.SchemaProps{ - Description: "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - Ref: ref("k8s.io/api/core/v1.PodStatus"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.PodStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_k8sio_api_core_v1_PodTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PodTemplate describes a template for creating copies of a predefined pod.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "template": { - SchemaProps: spec.SchemaProps{ - Description: "Template defines the pods that will be created from this pod template. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - Ref: ref("k8s.io/api/core/v1.PodTemplateSpec"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.PodTemplateSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_k8sio_api_core_v1_PodTemplateList(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PodTemplateList is a list of PodTemplates.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), - }, - }, - "items": { - SchemaProps: spec.SchemaProps{ - Description: "List of pod templates", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.PodTemplate"), - }, - }, - }, - }, - }, - }, - Required: []string{"items"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.PodTemplate", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, - } -} - -func schema_k8sio_api_core_v1_PodTemplateSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PodTemplateSpec describes the data a pod should have when created from a template", - Properties: map[string]spec.Schema{ - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "spec": { - SchemaProps: spec.SchemaProps{ - Description: "Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - Ref: ref("k8s.io/api/core/v1.PodSpec"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.PodSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_k8sio_api_core_v1_PortworxVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PortworxVolumeSource represents a Portworx volume resource.", - Properties: map[string]spec.Schema{ - "volumeID": { - SchemaProps: spec.SchemaProps{ - Description: "VolumeID uniquely identifies a Portworx volume", - Type: []string{"string"}, - Format: "", - }, - }, - "fsType": { - SchemaProps: spec.SchemaProps{ - Description: "FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified.", - Type: []string{"string"}, - Format: "", - }, - }, - "readOnly": { - SchemaProps: spec.SchemaProps{ - Description: "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - Required: []string{"volumeID"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_PreferAvoidPodsEntry(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Describes a class of pods that should avoid this node.", - Properties: map[string]spec.Schema{ - "podSignature": { - SchemaProps: spec.SchemaProps{ - Description: "The class of pods.", - Ref: ref("k8s.io/api/core/v1.PodSignature"), - }, - }, - "evictionTime": { - SchemaProps: spec.SchemaProps{ - Description: "Time at which this entry was added to the list.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), - }, - }, - "reason": { - SchemaProps: spec.SchemaProps{ - Description: "(brief) reason why this entry was added to the list.", - Type: []string{"string"}, - Format: "", - }, - }, - "message": { - SchemaProps: spec.SchemaProps{ - Description: "Human readable message indicating why this entry was added to the list.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"podSignature"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.PodSignature", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, - } -} - -func schema_k8sio_api_core_v1_PreferredSchedulingTerm(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).", - Properties: map[string]spec.Schema{ - "weight": { - SchemaProps: spec.SchemaProps{ - Description: "Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "preference": { - SchemaProps: spec.SchemaProps{ - Description: "A node selector term, associated with the corresponding weight.", - Ref: ref("k8s.io/api/core/v1.NodeSelectorTerm"), - }, - }, - }, - Required: []string{"weight", "preference"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.NodeSelectorTerm"}, - } -} - -func schema_k8sio_api_core_v1_Probe(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.", - Properties: map[string]spec.Schema{ - "exec": { - SchemaProps: spec.SchemaProps{ - Description: "One and only one of the following should be specified. Exec specifies the action to take.", - Ref: ref("k8s.io/api/core/v1.ExecAction"), - }, - }, - "httpGet": { - SchemaProps: spec.SchemaProps{ - Description: "HTTPGet specifies the http request to perform.", - Ref: ref("k8s.io/api/core/v1.HTTPGetAction"), - }, - }, - "tcpSocket": { - SchemaProps: spec.SchemaProps{ - Description: "TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported", - Ref: ref("k8s.io/api/core/v1.TCPSocketAction"), - }, - }, - "initialDelaySeconds": { - SchemaProps: spec.SchemaProps{ - Description: "Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "timeoutSeconds": { - SchemaProps: spec.SchemaProps{ - Description: "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "periodSeconds": { - SchemaProps: spec.SchemaProps{ - Description: "How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "successThreshold": { - SchemaProps: spec.SchemaProps{ - Description: "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "failureThreshold": { - SchemaProps: spec.SchemaProps{ - Description: "Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.ExecAction", "k8s.io/api/core/v1.HTTPGetAction", "k8s.io/api/core/v1.TCPSocketAction"}, - } -} - -func schema_k8sio_api_core_v1_ProjectedVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Represents a projected volume source", - Properties: map[string]spec.Schema{ - "sources": { - SchemaProps: spec.SchemaProps{ - Description: "list of volume projections", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.VolumeProjection"), - }, - }, - }, - }, - }, - "defaultMode": { - SchemaProps: spec.SchemaProps{ - Description: "Mode bits to use on created files by default. Must be a value between 0 and 0777. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - }, - Required: []string{"sources"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.VolumeProjection"}, - } -} - -func schema_k8sio_api_core_v1_QuobyteVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do not support ownership management or SELinux relabeling.", - Properties: map[string]spec.Schema{ - "registry": { - SchemaProps: spec.SchemaProps{ - Description: "Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes", - Type: []string{"string"}, - Format: "", - }, - }, - "volume": { - SchemaProps: spec.SchemaProps{ - Description: "Volume is a string that references an already created Quobyte volume by name.", - Type: []string{"string"}, - Format: "", - }, - }, - "readOnly": { - SchemaProps: spec.SchemaProps{ - Description: "ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "user": { - SchemaProps: spec.SchemaProps{ - Description: "User to map volume access to Defaults to serivceaccount user", - Type: []string{"string"}, - Format: "", - }, - }, - "group": { - SchemaProps: spec.SchemaProps{ - Description: "Group to map volume access to Default is no group", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"registry", "volume"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_RBDPersistentVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.", - Properties: map[string]spec.Schema{ - "monitors": { - SchemaProps: spec.SchemaProps{ - Description: "A collection of Ceph monitors. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "image": { - SchemaProps: spec.SchemaProps{ - Description: "The rados image name. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - Type: []string{"string"}, - Format: "", - }, - }, - "fsType": { - SchemaProps: spec.SchemaProps{ - Description: "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd", - Type: []string{"string"}, - Format: "", - }, - }, - "pool": { - SchemaProps: spec.SchemaProps{ - Description: "The rados pool name. Default is rbd. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - Type: []string{"string"}, - Format: "", - }, - }, - "user": { - SchemaProps: spec.SchemaProps{ - Description: "The rados user name. Default is admin. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - Type: []string{"string"}, - Format: "", - }, - }, - "keyring": { - SchemaProps: spec.SchemaProps{ - Description: "Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - Type: []string{"string"}, - Format: "", - }, - }, - "secretRef": { - SchemaProps: spec.SchemaProps{ - Description: "SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - Ref: ref("k8s.io/api/core/v1.SecretReference"), - }, - }, - "readOnly": { - SchemaProps: spec.SchemaProps{ - Description: "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - Required: []string{"monitors", "image"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.SecretReference"}, - } -} - -func schema_k8sio_api_core_v1_RBDVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.", - Properties: map[string]spec.Schema{ - "monitors": { - SchemaProps: spec.SchemaProps{ - Description: "A collection of Ceph monitors. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "image": { - SchemaProps: spec.SchemaProps{ - Description: "The rados image name. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - Type: []string{"string"}, - Format: "", - }, - }, - "fsType": { - SchemaProps: spec.SchemaProps{ - Description: "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd", - Type: []string{"string"}, - Format: "", - }, - }, - "pool": { - SchemaProps: spec.SchemaProps{ - Description: "The rados pool name. Default is rbd. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - Type: []string{"string"}, - Format: "", - }, - }, - "user": { - SchemaProps: spec.SchemaProps{ - Description: "The rados user name. Default is admin. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - Type: []string{"string"}, - Format: "", - }, - }, - "keyring": { - SchemaProps: spec.SchemaProps{ - Description: "Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - Type: []string{"string"}, - Format: "", - }, - }, - "secretRef": { - SchemaProps: spec.SchemaProps{ - Description: "SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), - }, - }, - "readOnly": { - SchemaProps: spec.SchemaProps{ - Description: "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - Required: []string{"monitors", "image"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.LocalObjectReference"}, - } -} - -func schema_k8sio_api_core_v1_RangeAllocation(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "RangeAllocation is not a public type.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "range": { - SchemaProps: spec.SchemaProps{ - Description: "Range is string that identifies the range represented by 'data'.", - Type: []string{"string"}, - Format: "", - }, - }, - "data": { - SchemaProps: spec.SchemaProps{ - Description: "Data is a bit array containing all allocated addresses in the previous segment.", - Type: []string{"string"}, - Format: "byte", - }, - }, - }, - Required: []string{"range", "data"}, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_k8sio_api_core_v1_ReplicationController(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ReplicationController represents the configuration of a replication controller.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "If the Labels of a ReplicationController are empty, they are defaulted to be the same as the Pod(s) that the replication controller manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "spec": { - SchemaProps: spec.SchemaProps{ - Description: "Spec defines the specification of the desired behavior of the replication controller. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - Ref: ref("k8s.io/api/core/v1.ReplicationControllerSpec"), - }, - }, - "status": { - SchemaProps: spec.SchemaProps{ - Description: "Status is the most recently observed status of the replication controller. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - Ref: ref("k8s.io/api/core/v1.ReplicationControllerStatus"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.ReplicationControllerSpec", "k8s.io/api/core/v1.ReplicationControllerStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_k8sio_api_core_v1_ReplicationControllerCondition(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ReplicationControllerCondition describes the state of a replication controller at a certain point.", - Properties: map[string]spec.Schema{ - "type": { - SchemaProps: spec.SchemaProps{ - Description: "Type of replication controller condition.", - Type: []string{"string"}, - Format: "", - }, - }, - "status": { - SchemaProps: spec.SchemaProps{ - Description: "Status of the condition, one of True, False, Unknown.", - Type: []string{"string"}, - Format: "", - }, - }, - "lastTransitionTime": { - SchemaProps: spec.SchemaProps{ - Description: "The last time the condition transitioned from one status to another.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), - }, - }, - "reason": { - SchemaProps: spec.SchemaProps{ - Description: "The reason for the condition's last transition.", - Type: []string{"string"}, - Format: "", - }, - }, - "message": { - SchemaProps: spec.SchemaProps{ - Description: "A human readable message indicating details about the transition.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"type", "status"}, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, - } -} - -func schema_k8sio_api_core_v1_ReplicationControllerList(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ReplicationControllerList is a collection of replication controllers.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), - }, - }, - "items": { - SchemaProps: spec.SchemaProps{ - Description: "List of replication controllers. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.ReplicationController"), - }, - }, - }, - }, - }, - }, - Required: []string{"items"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.ReplicationController", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, - } -} - -func schema_k8sio_api_core_v1_ReplicationControllerSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ReplicationControllerSpec is the specification of a replication controller.", - Properties: map[string]spec.Schema{ - "replicas": { - SchemaProps: spec.SchemaProps{ - Description: "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "minReadySeconds": { - SchemaProps: spec.SchemaProps{ - Description: "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "selector": { - SchemaProps: spec.SchemaProps{ - Description: "Selector is a label query over pods that should match the Replicas count. If Selector is empty, it is defaulted to the labels present on the Pod template. Label keys and values that must match in order to be controlled by this replication controller, if empty defaulted to labels on Pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "template": { - SchemaProps: spec.SchemaProps{ - Description: "Template is the object that describes the pod that will be created if insufficient replicas are detected. This takes precedence over a TemplateRef. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", - Ref: ref("k8s.io/api/core/v1.PodTemplateSpec"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.PodTemplateSpec"}, - } -} - -func schema_k8sio_api_core_v1_ReplicationControllerStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ReplicationControllerStatus represents the current status of a replication controller.", - Properties: map[string]spec.Schema{ - "replicas": { - SchemaProps: spec.SchemaProps{ - Description: "Replicas is the most recently oberved number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "fullyLabeledReplicas": { - SchemaProps: spec.SchemaProps{ - Description: "The number of pods that have labels matching the labels of the pod template of the replication controller.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "readyReplicas": { - SchemaProps: spec.SchemaProps{ - Description: "The number of ready replicas for this replication controller.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "availableReplicas": { - SchemaProps: spec.SchemaProps{ - Description: "The number of available replicas (ready for at least minReadySeconds) for this replication controller.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "observedGeneration": { - SchemaProps: spec.SchemaProps{ - Description: "ObservedGeneration reflects the generation of the most recently observed replication controller.", - Type: []string{"integer"}, - Format: "int64", - }, - }, - "conditions": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "type", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "Represents the latest available observations of a replication controller's current state.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.ReplicationControllerCondition"), - }, - }, - }, - }, - }, - }, - Required: []string{"replicas"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.ReplicationControllerCondition"}, - } -} - -func schema_k8sio_api_core_v1_ResourceFieldSelector(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ResourceFieldSelector represents container resources (cpu, memory) and their output format", - Properties: map[string]spec.Schema{ - "containerName": { - SchemaProps: spec.SchemaProps{ - Description: "Container name: required for volumes, optional for env vars", - Type: []string{"string"}, - Format: "", - }, - }, - "resource": { - SchemaProps: spec.SchemaProps{ - Description: "Required: resource to select", - Type: []string{"string"}, - Format: "", - }, - }, - "divisor": { - SchemaProps: spec.SchemaProps{ - Description: "Specifies the output format of the exposed resources, defaults to \"1\"", - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), - }, - }, - }, - Required: []string{"resource"}, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/api/resource.Quantity"}, - } -} - -func schema_k8sio_api_core_v1_ResourceQuota(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ResourceQuota sets aggregate quota restrictions enforced per namespace", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "spec": { - SchemaProps: spec.SchemaProps{ - Description: "Spec defines the desired quota. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - Ref: ref("k8s.io/api/core/v1.ResourceQuotaSpec"), - }, - }, - "status": { - SchemaProps: spec.SchemaProps{ - Description: "Status defines the actual enforced quota and its current usage. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - Ref: ref("k8s.io/api/core/v1.ResourceQuotaStatus"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.ResourceQuotaSpec", "k8s.io/api/core/v1.ResourceQuotaStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_k8sio_api_core_v1_ResourceQuotaList(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ResourceQuotaList is a list of ResourceQuota items.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), - }, - }, - "items": { - SchemaProps: spec.SchemaProps{ - Description: "Items is a list of ResourceQuota objects. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.ResourceQuota"), - }, - }, - }, - }, - }, - }, - Required: []string{"items"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.ResourceQuota", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, - } -} - -func schema_k8sio_api_core_v1_ResourceQuotaSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ResourceQuotaSpec defines the desired hard limits to enforce for Quota.", - Properties: map[string]spec.Schema{ - "hard": { - SchemaProps: spec.SchemaProps{ - Description: "hard is the set of desired hard limits for each named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), - }, - }, - }, - }, - }, - "scopes": { - SchemaProps: spec.SchemaProps{ - Description: "A collection of filters that must match each object tracked by a quota. If not specified, the quota matches all objects.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "scopeSelector": { - SchemaProps: spec.SchemaProps{ - Description: "scopeSelector is also a collection of filters like scopes that must match each object tracked by a quota but expressed using ScopeSelectorOperator in combination with possible values. For a resource to match, both scopes AND scopeSelector (if specified in spec), must be matched.", - Ref: ref("k8s.io/api/core/v1.ScopeSelector"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.ScopeSelector", "k8s.io/apimachinery/pkg/api/resource.Quantity"}, - } -} - -func schema_k8sio_api_core_v1_ResourceQuotaStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ResourceQuotaStatus defines the enforced hard limits and observed use.", - Properties: map[string]spec.Schema{ - "hard": { - SchemaProps: spec.SchemaProps{ - Description: "Hard is the set of enforced hard limits for each named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), - }, - }, - }, - }, - }, - "used": { - SchemaProps: spec.SchemaProps{ - Description: "Used is the current observed total usage of the resource in the namespace.", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/api/resource.Quantity"}, - } -} - -func schema_k8sio_api_core_v1_ResourceRequirements(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ResourceRequirements describes the compute resource requirements.", - Properties: map[string]spec.Schema{ - "limits": { - SchemaProps: spec.SchemaProps{ - Description: "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), - }, - }, - }, - }, - }, - "requests": { - SchemaProps: spec.SchemaProps{ - Description: "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"), - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/api/resource.Quantity"}, - } -} - -func schema_k8sio_api_core_v1_SELinuxOptions(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "SELinuxOptions are the labels to be applied to the container", - Properties: map[string]spec.Schema{ - "user": { - SchemaProps: spec.SchemaProps{ - Description: "User is a SELinux user label that applies to the container.", - Type: []string{"string"}, - Format: "", - }, - }, - "role": { - SchemaProps: spec.SchemaProps{ - Description: "Role is a SELinux role label that applies to the container.", - Type: []string{"string"}, - Format: "", - }, - }, - "type": { - SchemaProps: spec.SchemaProps{ - Description: "Type is a SELinux type label that applies to the container.", - Type: []string{"string"}, - Format: "", - }, - }, - "level": { - SchemaProps: spec.SchemaProps{ - Description: "Level is SELinux level label that applies to the container.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_ScaleIOPersistentVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume", - Properties: map[string]spec.Schema{ - "gateway": { - SchemaProps: spec.SchemaProps{ - Description: "The host address of the ScaleIO API Gateway.", - Type: []string{"string"}, - Format: "", - }, - }, - "system": { - SchemaProps: spec.SchemaProps{ - Description: "The name of the storage system as configured in ScaleIO.", - Type: []string{"string"}, - Format: "", - }, - }, - "secretRef": { - SchemaProps: spec.SchemaProps{ - Description: "SecretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail.", - Ref: ref("k8s.io/api/core/v1.SecretReference"), - }, - }, - "sslEnabled": { - SchemaProps: spec.SchemaProps{ - Description: "Flag to enable/disable SSL communication with Gateway, default false", - Type: []string{"boolean"}, - Format: "", - }, - }, - "protectionDomain": { - SchemaProps: spec.SchemaProps{ - Description: "The name of the ScaleIO Protection Domain for the configured storage.", - Type: []string{"string"}, - Format: "", - }, - }, - "storagePool": { - SchemaProps: spec.SchemaProps{ - Description: "The ScaleIO Storage Pool associated with the protection domain.", - Type: []string{"string"}, - Format: "", - }, - }, - "storageMode": { - SchemaProps: spec.SchemaProps{ - Description: "Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned.", - Type: []string{"string"}, - Format: "", - }, - }, - "volumeName": { - SchemaProps: spec.SchemaProps{ - Description: "The name of a volume already created in the ScaleIO system that is associated with this volume source.", - Type: []string{"string"}, - Format: "", - }, - }, - "fsType": { - SchemaProps: spec.SchemaProps{ - Description: "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Default is \"xfs\"", - Type: []string{"string"}, - Format: "", - }, - }, - "readOnly": { - SchemaProps: spec.SchemaProps{ - Description: "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - Required: []string{"gateway", "system", "secretRef"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.SecretReference"}, - } -} - -func schema_k8sio_api_core_v1_ScaleIOVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ScaleIOVolumeSource represents a persistent ScaleIO volume", - Properties: map[string]spec.Schema{ - "gateway": { - SchemaProps: spec.SchemaProps{ - Description: "The host address of the ScaleIO API Gateway.", - Type: []string{"string"}, - Format: "", - }, - }, - "system": { - SchemaProps: spec.SchemaProps{ - Description: "The name of the storage system as configured in ScaleIO.", - Type: []string{"string"}, - Format: "", - }, - }, - "secretRef": { - SchemaProps: spec.SchemaProps{ - Description: "SecretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail.", - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), - }, - }, - "sslEnabled": { - SchemaProps: spec.SchemaProps{ - Description: "Flag to enable/disable SSL communication with Gateway, default false", - Type: []string{"boolean"}, - Format: "", - }, - }, - "protectionDomain": { - SchemaProps: spec.SchemaProps{ - Description: "The name of the ScaleIO Protection Domain for the configured storage.", - Type: []string{"string"}, - Format: "", - }, - }, - "storagePool": { - SchemaProps: spec.SchemaProps{ - Description: "The ScaleIO Storage Pool associated with the protection domain.", - Type: []string{"string"}, - Format: "", - }, - }, - "storageMode": { - SchemaProps: spec.SchemaProps{ - Description: "Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned.", - Type: []string{"string"}, - Format: "", - }, - }, - "volumeName": { - SchemaProps: spec.SchemaProps{ - Description: "The name of a volume already created in the ScaleIO system that is associated with this volume source.", - Type: []string{"string"}, - Format: "", - }, - }, - "fsType": { - SchemaProps: spec.SchemaProps{ - Description: "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Default is \"xfs\".", - Type: []string{"string"}, - Format: "", - }, - }, - "readOnly": { - SchemaProps: spec.SchemaProps{ - Description: "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - Required: []string{"gateway", "system", "secretRef"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.LocalObjectReference"}, - } -} - -func schema_k8sio_api_core_v1_ScopeSelector(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "A scope selector represents the AND of the selectors represented by the scoped-resource selector requirements.", - Properties: map[string]spec.Schema{ - "matchExpressions": { - SchemaProps: spec.SchemaProps{ - Description: "A list of scope selector requirements by scope of the resources.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.ScopedResourceSelectorRequirement"), - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.ScopedResourceSelectorRequirement"}, - } -} - -func schema_k8sio_api_core_v1_ScopedResourceSelectorRequirement(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "A scoped-resource selector requirement is a selector that contains values, a scope name, and an operator that relates the scope name and values.", - Properties: map[string]spec.Schema{ - "scopeName": { - SchemaProps: spec.SchemaProps{ - Description: "The name of the scope that the selector applies to.", - Type: []string{"string"}, - Format: "", - }, - }, - "operator": { - SchemaProps: spec.SchemaProps{ - Description: "Represents a scope's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist.", - Type: []string{"string"}, - Format: "", - }, - }, - "values": { - SchemaProps: spec.SchemaProps{ - Description: "An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - }, - Required: []string{"scopeName", "operator"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_Secret(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Secret holds secret data of a certain type. The total bytes of the values in the Data field must be less than MaxSecretSize bytes.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "data": { - SchemaProps: spec.SchemaProps{ - Description: "Data contains the secret data. Each key must consist of alphanumeric characters, '-', '_' or '.'. The serialized form of the secret data is a base64 encoded string, representing the arbitrary (possibly non-string) data value here. Described in https://tools.ietf.org/html/rfc4648#section-4", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "byte", - }, - }, - }, - }, - }, - "stringData": { - SchemaProps: spec.SchemaProps{ - Description: "stringData allows specifying non-binary secret data in string form. It is provided as a write-only convenience method. All keys and values are merged into the data field on write, overwriting any existing values. It is never output when reading from the API.", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "type": { - SchemaProps: spec.SchemaProps{ - Description: "Used to facilitate programmatic handling of secret data.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_k8sio_api_core_v1_SecretEnvSource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "SecretEnvSource selects a Secret to populate the environment variables with.\n\nThe contents of the target Secret's Data field will represent the key-value pairs as environment variables.", - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", - Type: []string{"string"}, - Format: "", - }, - }, - "optional": { - SchemaProps: spec.SchemaProps{ - Description: "Specify whether the Secret must be defined", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_SecretKeySelector(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "SecretKeySelector selects a key of a Secret.", - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", - Type: []string{"string"}, - Format: "", - }, - }, - "key": { - SchemaProps: spec.SchemaProps{ - Description: "The key of the secret to select from. Must be a valid secret key.", - Type: []string{"string"}, - Format: "", - }, - }, - "optional": { - SchemaProps: spec.SchemaProps{ - Description: "Specify whether the Secret or it's key must be defined", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - Required: []string{"key"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_SecretList(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "SecretList is a list of Secret.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), - }, - }, - "items": { - SchemaProps: spec.SchemaProps{ - Description: "Items is a list of secret objects. More info: https://kubernetes.io/docs/concepts/configuration/secret", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.Secret"), - }, - }, - }, - }, - }, - }, - Required: []string{"items"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.Secret", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, - } -} - -func schema_k8sio_api_core_v1_SecretProjection(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Adapts a secret into a projected volume.\n\nThe contents of the target Secret's Data field will be presented in a projected volume as files using the keys in the Data field as the file names. Note that this is identical to a secret volume source without the default mode.", - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", - Type: []string{"string"}, - Format: "", - }, - }, - "items": { - SchemaProps: spec.SchemaProps{ - Description: "If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.KeyToPath"), - }, - }, - }, - }, - }, - "optional": { - SchemaProps: spec.SchemaProps{ - Description: "Specify whether the Secret or its key must be defined", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.KeyToPath"}, - } -} - -func schema_k8sio_api_core_v1_SecretReference(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace", - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name is unique within a namespace to reference a secret resource.", - Type: []string{"string"}, - Format: "", - }, - }, - "namespace": { - SchemaProps: spec.SchemaProps{ - Description: "Namespace defines the space within which the secret name must be unique.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_SecretVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Adapts a Secret into a volume.\n\nThe contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. Secret volumes support ownership management and SELinux relabeling.", - Properties: map[string]spec.Schema{ - "secretName": { - SchemaProps: spec.SchemaProps{ - Description: "Name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", - Type: []string{"string"}, - Format: "", - }, - }, - "items": { - SchemaProps: spec.SchemaProps{ - Description: "If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.KeyToPath"), - }, - }, - }, - }, - }, - "defaultMode": { - SchemaProps: spec.SchemaProps{ - Description: "Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "optional": { - SchemaProps: spec.SchemaProps{ - Description: "Specify whether the Secret or it's keys must be defined", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.KeyToPath"}, - } -} - -func schema_k8sio_api_core_v1_SecurityContext(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence.", - Properties: map[string]spec.Schema{ - "capabilities": { - SchemaProps: spec.SchemaProps{ - Description: "The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.", - Ref: ref("k8s.io/api/core/v1.Capabilities"), - }, - }, - "privileged": { - SchemaProps: spec.SchemaProps{ - Description: "Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "seLinuxOptions": { - SchemaProps: spec.SchemaProps{ - Description: "The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", - Ref: ref("k8s.io/api/core/v1.SELinuxOptions"), - }, - }, - "runAsUser": { - SchemaProps: spec.SchemaProps{ - Description: "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", - Type: []string{"integer"}, - Format: "int64", - }, - }, - "runAsGroup": { - SchemaProps: spec.SchemaProps{ - Description: "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", - Type: []string{"integer"}, - Format: "int64", - }, - }, - "runAsNonRoot": { - SchemaProps: spec.SchemaProps{ - Description: "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "readOnlyRootFilesystem": { - SchemaProps: spec.SchemaProps{ - Description: "Whether this container has a read-only root filesystem. Default is false.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "allowPrivilegeEscalation": { - SchemaProps: spec.SchemaProps{ - Description: "AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN", - Type: []string{"boolean"}, - Format: "", - }, - }, - "procMount": { - SchemaProps: spec.SchemaProps{ - Description: "procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.Capabilities", "k8s.io/api/core/v1.SELinuxOptions"}, - } -} - -func schema_k8sio_api_core_v1_SerializedReference(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "SerializedReference is a reference to serialized object.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "reference": { - SchemaProps: spec.SchemaProps{ - Description: "The reference to an object in the system.", - Ref: ref("k8s.io/api/core/v1.ObjectReference"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.ObjectReference"}, - } -} - -func schema_k8sio_api_core_v1_Service(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Service is a named abstraction of software service (for example, mysql) consisting of local port (for example 3306) that the proxy listens on, and the selector that determines which pods will answer requests sent through the proxy.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "spec": { - SchemaProps: spec.SchemaProps{ - Description: "Spec defines the behavior of a service. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - Ref: ref("k8s.io/api/core/v1.ServiceSpec"), - }, - }, - "status": { - SchemaProps: spec.SchemaProps{ - Description: "Most recently observed status of the service. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - Ref: ref("k8s.io/api/core/v1.ServiceStatus"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.ServiceSpec", "k8s.io/api/core/v1.ServiceStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_k8sio_api_core_v1_ServiceAccount(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ServiceAccount binds together: * a name, understood by users, and perhaps by peripheral systems, for an identity * a principal that can be authenticated and authorized * a set of secrets", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "secrets": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. More info: https://kubernetes.io/docs/concepts/configuration/secret", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.ObjectReference"), - }, - }, - }, - }, - }, - "imagePullSecrets": { - SchemaProps: spec.SchemaProps{ - Description: "ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), - }, - }, - }, - }, - }, - "automountServiceAccountToken": { - SchemaProps: spec.SchemaProps{ - Description: "AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. Can be overridden at the pod level.", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.ObjectReference", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_k8sio_api_core_v1_ServiceAccountList(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ServiceAccountList is a list of ServiceAccount objects", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), - }, - }, - "items": { - SchemaProps: spec.SchemaProps{ - Description: "List of ServiceAccounts. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.ServiceAccount"), - }, - }, - }, - }, - }, - }, - Required: []string{"items"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.ServiceAccount", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, - } -} - -func schema_k8sio_api_core_v1_ServiceAccountTokenProjection(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ServiceAccountTokenProjection represents a projected service account token volume. This projection can be used to insert a service account token into the pods runtime filesystem for use against APIs (Kubernetes API Server or otherwise).", - Properties: map[string]spec.Schema{ - "audience": { - SchemaProps: spec.SchemaProps{ - Description: "Audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.", - Type: []string{"string"}, - Format: "", - }, - }, - "expirationSeconds": { - SchemaProps: spec.SchemaProps{ - Description: "ExpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.", - Type: []string{"integer"}, - Format: "int64", - }, - }, - "path": { - SchemaProps: spec.SchemaProps{ - Description: "Path is the path relative to the mount point of the file to project the token into.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"path"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_ServiceList(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ServiceList holds a list of services.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), - }, - }, - "items": { - SchemaProps: spec.SchemaProps{ - Description: "List of services", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.Service"), - }, - }, - }, - }, - }, - }, - Required: []string{"items"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.Service", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, - } -} - -func schema_k8sio_api_core_v1_ServicePort(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ServicePort contains information on service's port.", - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. This maps to the 'Name' field in EndpointPort objects. Optional if only one ServicePort is defined on this service.", - Type: []string{"string"}, - Format: "", - }, - }, - "protocol": { - SchemaProps: spec.SchemaProps{ - Description: "The IP protocol for this port. Supports \"TCP\", \"UDP\", and \"SCTP\". Default is TCP.", - Type: []string{"string"}, - Format: "", - }, - }, - "port": { - SchemaProps: spec.SchemaProps{ - Description: "The port that will be exposed by this service.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "targetPort": { - SchemaProps: spec.SchemaProps{ - Description: "Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service", - Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), - }, - }, - "nodePort": { - SchemaProps: spec.SchemaProps{ - Description: "The port on each node on which this service is exposed when type=NodePort or LoadBalancer. Usually assigned by the system. If specified, it will be allocated to the service if unused or else creation of the service will fail. Default is to auto-allocate a port if the ServiceType of this Service requires one. More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport", - Type: []string{"integer"}, - Format: "int32", - }, - }, - }, - Required: []string{"port"}, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/util/intstr.IntOrString"}, - } -} - -func schema_k8sio_api_core_v1_ServiceProxyOptions(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ServiceProxyOptions is the query options to a Service's proxy call.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "path": { - SchemaProps: spec.SchemaProps{ - Description: "Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_ServiceSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ServiceSpec describes the attributes that a user creates on a service.", - Properties: map[string]spec.Schema{ - "ports": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "port", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "The list of ports that are exposed by this service. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.ServicePort"), - }, - }, - }, - }, - }, - "selector": { - SchemaProps: spec.SchemaProps{ - Description: "Route service traffic to pods with label keys and values matching this selector. If empty or not present, the service is assumed to have an external process managing its endpoints, which Kubernetes will not modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "clusterIP": { - SchemaProps: spec.SchemaProps{ - Description: "clusterIP is the IP address of the service and is usually assigned randomly by the master. If an address is specified manually and is not in use by others, it will be allocated to the service; otherwise, creation of the service will fail. This field can not be changed through updates. Valid values are \"None\", empty string (\"\"), or a valid IP address. \"None\" can be specified for headless services when proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", - Type: []string{"string"}, - Format: "", - }, - }, - "type": { - SchemaProps: spec.SchemaProps{ - Description: "type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. \"ExternalName\" maps to the specified externalName. \"ClusterIP\" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object. If clusterIP is \"None\", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a stable IP. \"NodePort\" builds on ClusterIP and allocates a port on every node which routes to the clusterIP. \"LoadBalancer\" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the clusterIP. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services---service-types", - Type: []string{"string"}, - Format: "", - }, - }, - "externalIPs": { - SchemaProps: spec.SchemaProps{ - Description: "externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes. The user is responsible for ensuring that traffic arrives at a node with this IP. A common example is external load-balancers that are not part of the Kubernetes system.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "sessionAffinity": { - SchemaProps: spec.SchemaProps{ - Description: "Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", - Type: []string{"string"}, - Format: "", - }, - }, - "loadBalancerIP": { - SchemaProps: spec.SchemaProps{ - Description: "Only applies to Service Type: LoadBalancer LoadBalancer will get created with the IP specified in this field. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature.", - Type: []string{"string"}, - Format: "", - }, - }, - "loadBalancerSourceRanges": { - SchemaProps: spec.SchemaProps{ - Description: "If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\" More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "externalName": { - SchemaProps: spec.SchemaProps{ - Description: "externalName is the external reference that kubedns or equivalent will return as a CNAME record for this service. No proxying will be involved. Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) and requires Type to be ExternalName.", - Type: []string{"string"}, - Format: "", - }, - }, - "externalTrafficPolicy": { - SchemaProps: spec.SchemaProps{ - Description: "externalTrafficPolicy denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints. \"Local\" preserves the client source IP and avoids a second hop for LoadBalancer and Nodeport type services, but risks potentially imbalanced traffic spreading. \"Cluster\" obscures the client source IP and may cause a second hop to another node, but should have good overall load-spreading.", - Type: []string{"string"}, - Format: "", - }, - }, - "healthCheckNodePort": { - SchemaProps: spec.SchemaProps{ - Description: "healthCheckNodePort specifies the healthcheck nodePort for the service. If not specified, HealthCheckNodePort is created by the service api backend with the allocated nodePort. Will use user-specified nodePort value if specified by the client. Only effects when Type is set to LoadBalancer and ExternalTrafficPolicy is set to Local.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "publishNotReadyAddresses": { - SchemaProps: spec.SchemaProps{ - Description: "publishNotReadyAddresses, when set to true, indicates that DNS implementations must publish the notReadyAddresses of subsets for the Endpoints associated with the Service. The default value is false. The primary use case for setting this field is to use a StatefulSet's Headless Service to propagate SRV records for its Pods without respect to their readiness for purpose of peer discovery.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "sessionAffinityConfig": { - SchemaProps: spec.SchemaProps{ - Description: "sessionAffinityConfig contains the configurations of session affinity.", - Ref: ref("k8s.io/api/core/v1.SessionAffinityConfig"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.ServicePort", "k8s.io/api/core/v1.SessionAffinityConfig"}, - } -} - -func schema_k8sio_api_core_v1_ServiceStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ServiceStatus represents the current status of a service.", - Properties: map[string]spec.Schema{ - "loadBalancer": { - SchemaProps: spec.SchemaProps{ - Description: "LoadBalancer contains the current status of the load-balancer, if one is present.", - Ref: ref("k8s.io/api/core/v1.LoadBalancerStatus"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.LoadBalancerStatus"}, - } -} - -func schema_k8sio_api_core_v1_SessionAffinityConfig(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "SessionAffinityConfig represents the configurations of session affinity.", - Properties: map[string]spec.Schema{ - "clientIP": { - SchemaProps: spec.SchemaProps{ - Description: "clientIP contains the configurations of Client IP based session affinity.", - Ref: ref("k8s.io/api/core/v1.ClientIPConfig"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.ClientIPConfig"}, - } -} - -func schema_k8sio_api_core_v1_StorageOSPersistentVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Represents a StorageOS persistent volume resource.", - Properties: map[string]spec.Schema{ - "volumeName": { - SchemaProps: spec.SchemaProps{ - Description: "VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace.", - Type: []string{"string"}, - Format: "", - }, - }, - "volumeNamespace": { - SchemaProps: spec.SchemaProps{ - Description: "VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \"default\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.", - Type: []string{"string"}, - Format: "", - }, - }, - "fsType": { - SchemaProps: spec.SchemaProps{ - Description: "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", - Type: []string{"string"}, - Format: "", - }, - }, - "readOnly": { - SchemaProps: spec.SchemaProps{ - Description: "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "secretRef": { - SchemaProps: spec.SchemaProps{ - Description: "SecretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted.", - Ref: ref("k8s.io/api/core/v1.ObjectReference"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.ObjectReference"}, - } -} - -func schema_k8sio_api_core_v1_StorageOSVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Represents a StorageOS persistent volume resource.", - Properties: map[string]spec.Schema{ - "volumeName": { - SchemaProps: spec.SchemaProps{ - Description: "VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace.", - Type: []string{"string"}, - Format: "", - }, - }, - "volumeNamespace": { - SchemaProps: spec.SchemaProps{ - Description: "VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \"default\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.", - Type: []string{"string"}, - Format: "", - }, - }, - "fsType": { - SchemaProps: spec.SchemaProps{ - Description: "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", - Type: []string{"string"}, - Format: "", - }, - }, - "readOnly": { - SchemaProps: spec.SchemaProps{ - Description: "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "secretRef": { - SchemaProps: spec.SchemaProps{ - Description: "SecretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted.", - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.LocalObjectReference"}, - } -} - -func schema_k8sio_api_core_v1_Sysctl(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Sysctl defines a kernel parameter to be set", - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name of a property to set", - Type: []string{"string"}, - Format: "", - }, - }, - "value": { - SchemaProps: spec.SchemaProps{ - Description: "Value of a property to set", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"name", "value"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_TCPSocketAction(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "TCPSocketAction describes an action based on opening a socket", - Properties: map[string]spec.Schema{ - "port": { - SchemaProps: spec.SchemaProps{ - Description: "Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.", - Ref: ref("k8s.io/apimachinery/pkg/util/intstr.IntOrString"), - }, - }, - "host": { - SchemaProps: spec.SchemaProps{ - Description: "Optional: Host name to connect to, defaults to the pod IP.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"port"}, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/util/intstr.IntOrString"}, - } -} - -func schema_k8sio_api_core_v1_Taint(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "The node this Taint is attached to has the \"effect\" on any pod that does not tolerate the Taint.", - Properties: map[string]spec.Schema{ - "key": { - SchemaProps: spec.SchemaProps{ - Description: "Required. The taint key to be applied to a node.", - Type: []string{"string"}, - Format: "", - }, - }, - "value": { - SchemaProps: spec.SchemaProps{ - Description: "Required. The taint value corresponding to the taint key.", - Type: []string{"string"}, - Format: "", - }, - }, - "effect": { - SchemaProps: spec.SchemaProps{ - Description: "Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule and NoExecute.", - Type: []string{"string"}, - Format: "", - }, - }, - "timeAdded": { - SchemaProps: spec.SchemaProps{ - Description: "TimeAdded represents the time at which the taint was added. It is only written for NoExecute taints.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), - }, - }, - }, - Required: []string{"key", "effect"}, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, - } -} - -func schema_k8sio_api_core_v1_Toleration(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator .", - Properties: map[string]spec.Schema{ - "key": { - SchemaProps: spec.SchemaProps{ - Description: "Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.", - Type: []string{"string"}, - Format: "", - }, - }, - "operator": { - SchemaProps: spec.SchemaProps{ - Description: "Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.", - Type: []string{"string"}, - Format: "", - }, - }, - "value": { - SchemaProps: spec.SchemaProps{ - Description: "Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.", - Type: []string{"string"}, - Format: "", - }, - }, - "effect": { - SchemaProps: spec.SchemaProps{ - Description: "Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.", - Type: []string{"string"}, - Format: "", - }, - }, - "tolerationSeconds": { - SchemaProps: spec.SchemaProps{ - Description: "TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.", - Type: []string{"integer"}, - Format: "int64", - }, - }, - }, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_TopologySelectorLabelRequirement(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "A topology selector requirement is a selector that matches given label. This is an alpha feature and may change in the future.", - Properties: map[string]spec.Schema{ - "key": { - SchemaProps: spec.SchemaProps{ - Description: "The label key that the selector applies to.", - Type: []string{"string"}, - Format: "", - }, - }, - "values": { - SchemaProps: spec.SchemaProps{ - Description: "An array of string values. One value must match the label to be selected. Each entry in Values is ORed.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - }, - Required: []string{"key", "values"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_TopologySelectorTerm(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "A topology selector term represents the result of label queries. A null or empty topology selector term matches no objects. The requirements of them are ANDed. It provides a subset of functionality as NodeSelectorTerm. This is an alpha feature and may change in the future.", - Properties: map[string]spec.Schema{ - "matchLabelExpressions": { - SchemaProps: spec.SchemaProps{ - Description: "A list of topology selector requirements by labels.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.TopologySelectorLabelRequirement"), - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.TopologySelectorLabelRequirement"}, - } -} - -func schema_k8sio_api_core_v1_TypedLocalObjectReference(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace.", - Properties: map[string]spec.Schema{ - "apiGroup": { - SchemaProps: spec.SchemaProps{ - Description: "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", - Type: []string{"string"}, - Format: "", - }, - }, - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is the type of resource being referenced", - Type: []string{"string"}, - Format: "", - }, - }, - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name is the name of resource being referenced", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"kind", "name"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_Volume(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Volume represents a named volume in a pod that may be accessed by any container in the pod.", - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Volume's name. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", - Type: []string{"string"}, - Format: "", - }, - }, - "hostPath": { - SchemaProps: spec.SchemaProps{ - Description: "HostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", - Ref: ref("k8s.io/api/core/v1.HostPathVolumeSource"), - }, - }, - "emptyDir": { - SchemaProps: spec.SchemaProps{ - Description: "EmptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", - Ref: ref("k8s.io/api/core/v1.EmptyDirVolumeSource"), - }, - }, - "gcePersistentDisk": { - SchemaProps: spec.SchemaProps{ - Description: "GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", - Ref: ref("k8s.io/api/core/v1.GCEPersistentDiskVolumeSource"), - }, - }, - "awsElasticBlockStore": { - SchemaProps: spec.SchemaProps{ - Description: "AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", - Ref: ref("k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource"), - }, - }, - "gitRepo": { - SchemaProps: spec.SchemaProps{ - Description: "GitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.", - Ref: ref("k8s.io/api/core/v1.GitRepoVolumeSource"), - }, - }, - "secret": { - SchemaProps: spec.SchemaProps{ - Description: "Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", - Ref: ref("k8s.io/api/core/v1.SecretVolumeSource"), - }, - }, - "nfs": { - SchemaProps: spec.SchemaProps{ - Description: "NFS represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", - Ref: ref("k8s.io/api/core/v1.NFSVolumeSource"), - }, - }, - "iscsi": { - SchemaProps: spec.SchemaProps{ - Description: "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md", - Ref: ref("k8s.io/api/core/v1.ISCSIVolumeSource"), - }, - }, - "glusterfs": { - SchemaProps: spec.SchemaProps{ - Description: "Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md", - Ref: ref("k8s.io/api/core/v1.GlusterfsVolumeSource"), - }, - }, - "persistentVolumeClaim": { - SchemaProps: spec.SchemaProps{ - Description: "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", - Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource"), - }, - }, - "rbd": { - SchemaProps: spec.SchemaProps{ - Description: "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md", - Ref: ref("k8s.io/api/core/v1.RBDVolumeSource"), - }, - }, - "flexVolume": { - SchemaProps: spec.SchemaProps{ - Description: "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", - Ref: ref("k8s.io/api/core/v1.FlexVolumeSource"), - }, - }, - "cinder": { - SchemaProps: spec.SchemaProps{ - Description: "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", - Ref: ref("k8s.io/api/core/v1.CinderVolumeSource"), - }, - }, - "cephfs": { - SchemaProps: spec.SchemaProps{ - Description: "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime", - Ref: ref("k8s.io/api/core/v1.CephFSVolumeSource"), - }, - }, - "flocker": { - SchemaProps: spec.SchemaProps{ - Description: "Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running", - Ref: ref("k8s.io/api/core/v1.FlockerVolumeSource"), - }, - }, - "downwardAPI": { - SchemaProps: spec.SchemaProps{ - Description: "DownwardAPI represents downward API about the pod that should populate this volume", - Ref: ref("k8s.io/api/core/v1.DownwardAPIVolumeSource"), - }, - }, - "fc": { - SchemaProps: spec.SchemaProps{ - Description: "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", - Ref: ref("k8s.io/api/core/v1.FCVolumeSource"), - }, - }, - "azureFile": { - SchemaProps: spec.SchemaProps{ - Description: "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", - Ref: ref("k8s.io/api/core/v1.AzureFileVolumeSource"), - }, - }, - "configMap": { - SchemaProps: spec.SchemaProps{ - Description: "ConfigMap represents a configMap that should populate this volume", - Ref: ref("k8s.io/api/core/v1.ConfigMapVolumeSource"), - }, - }, - "vsphereVolume": { - SchemaProps: spec.SchemaProps{ - Description: "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", - Ref: ref("k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource"), - }, - }, - "quobyte": { - SchemaProps: spec.SchemaProps{ - Description: "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime", - Ref: ref("k8s.io/api/core/v1.QuobyteVolumeSource"), - }, - }, - "azureDisk": { - SchemaProps: spec.SchemaProps{ - Description: "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", - Ref: ref("k8s.io/api/core/v1.AzureDiskVolumeSource"), - }, - }, - "photonPersistentDisk": { - SchemaProps: spec.SchemaProps{ - Description: "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", - Ref: ref("k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource"), - }, - }, - "projected": { - SchemaProps: spec.SchemaProps{ - Description: "Items for all in one resources secrets, configmaps, and downward API", - Ref: ref("k8s.io/api/core/v1.ProjectedVolumeSource"), - }, - }, - "portworxVolume": { - SchemaProps: spec.SchemaProps{ - Description: "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine", - Ref: ref("k8s.io/api/core/v1.PortworxVolumeSource"), - }, - }, - "scaleIO": { - SchemaProps: spec.SchemaProps{ - Description: "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", - Ref: ref("k8s.io/api/core/v1.ScaleIOVolumeSource"), - }, - }, - "storageos": { - SchemaProps: spec.SchemaProps{ - Description: "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.", - Ref: ref("k8s.io/api/core/v1.StorageOSVolumeSource"), - }, - }, - }, - Required: []string{"name"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource", "k8s.io/api/core/v1.AzureDiskVolumeSource", "k8s.io/api/core/v1.AzureFileVolumeSource", "k8s.io/api/core/v1.CephFSVolumeSource", "k8s.io/api/core/v1.CinderVolumeSource", "k8s.io/api/core/v1.ConfigMapVolumeSource", "k8s.io/api/core/v1.DownwardAPIVolumeSource", "k8s.io/api/core/v1.EmptyDirVolumeSource", "k8s.io/api/core/v1.FCVolumeSource", "k8s.io/api/core/v1.FlexVolumeSource", "k8s.io/api/core/v1.FlockerVolumeSource", "k8s.io/api/core/v1.GCEPersistentDiskVolumeSource", "k8s.io/api/core/v1.GitRepoVolumeSource", "k8s.io/api/core/v1.GlusterfsVolumeSource", "k8s.io/api/core/v1.HostPathVolumeSource", "k8s.io/api/core/v1.ISCSIVolumeSource", "k8s.io/api/core/v1.NFSVolumeSource", "k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource", "k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource", "k8s.io/api/core/v1.PortworxVolumeSource", "k8s.io/api/core/v1.ProjectedVolumeSource", "k8s.io/api/core/v1.QuobyteVolumeSource", "k8s.io/api/core/v1.RBDVolumeSource", "k8s.io/api/core/v1.ScaleIOVolumeSource", "k8s.io/api/core/v1.SecretVolumeSource", "k8s.io/api/core/v1.StorageOSVolumeSource", "k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource"}, - } -} - -func schema_k8sio_api_core_v1_VolumeDevice(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "volumeDevice describes a mapping of a raw block device within a container.", - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "name must match the name of a persistentVolumeClaim in the pod", - Type: []string{"string"}, - Format: "", - }, - }, - "devicePath": { - SchemaProps: spec.SchemaProps{ - Description: "devicePath is the path inside of the container that the device will be mapped to.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"name", "devicePath"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_VolumeMount(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "VolumeMount describes a mounting of a Volume within a container.", - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "This must match the Name of a Volume.", - Type: []string{"string"}, - Format: "", - }, - }, - "readOnly": { - SchemaProps: spec.SchemaProps{ - Description: "Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "mountPath": { - SchemaProps: spec.SchemaProps{ - Description: "Path within the container at which the volume should be mounted. Must not contain ':'.", - Type: []string{"string"}, - Format: "", - }, - }, - "subPath": { - SchemaProps: spec.SchemaProps{ - Description: "Path within the volume from which the container's volume should be mounted. Defaults to \"\" (volume's root).", - Type: []string{"string"}, - Format: "", - }, - }, - "mountPropagation": { - SchemaProps: spec.SchemaProps{ - Description: "mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"name", "mountPath"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_VolumeNodeAffinity(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from.", - Properties: map[string]spec.Schema{ - "required": { - SchemaProps: spec.SchemaProps{ - Description: "Required specifies hard node constraints that must be met.", - Ref: ref("k8s.io/api/core/v1.NodeSelector"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.NodeSelector"}, - } -} - -func schema_k8sio_api_core_v1_VolumeProjection(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Projection that may be projected along with other supported volume types", - Properties: map[string]spec.Schema{ - "secret": { - SchemaProps: spec.SchemaProps{ - Description: "information about the secret data to project", - Ref: ref("k8s.io/api/core/v1.SecretProjection"), - }, - }, - "downwardAPI": { - SchemaProps: spec.SchemaProps{ - Description: "information about the downwardAPI data to project", - Ref: ref("k8s.io/api/core/v1.DownwardAPIProjection"), - }, - }, - "configMap": { - SchemaProps: spec.SchemaProps{ - Description: "information about the configMap data to project", - Ref: ref("k8s.io/api/core/v1.ConfigMapProjection"), - }, - }, - "serviceAccountToken": { - SchemaProps: spec.SchemaProps{ - Description: "information about the serviceAccountToken data to project", - Ref: ref("k8s.io/api/core/v1.ServiceAccountTokenProjection"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.ConfigMapProjection", "k8s.io/api/core/v1.DownwardAPIProjection", "k8s.io/api/core/v1.SecretProjection", "k8s.io/api/core/v1.ServiceAccountTokenProjection"}, - } -} - -func schema_k8sio_api_core_v1_VolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Represents the source of a volume to mount. Only one of its members may be specified.", - Properties: map[string]spec.Schema{ - "hostPath": { - SchemaProps: spec.SchemaProps{ - Description: "HostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", - Ref: ref("k8s.io/api/core/v1.HostPathVolumeSource"), - }, - }, - "emptyDir": { - SchemaProps: spec.SchemaProps{ - Description: "EmptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", - Ref: ref("k8s.io/api/core/v1.EmptyDirVolumeSource"), - }, - }, - "gcePersistentDisk": { - SchemaProps: spec.SchemaProps{ - Description: "GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", - Ref: ref("k8s.io/api/core/v1.GCEPersistentDiskVolumeSource"), - }, - }, - "awsElasticBlockStore": { - SchemaProps: spec.SchemaProps{ - Description: "AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", - Ref: ref("k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource"), - }, - }, - "gitRepo": { - SchemaProps: spec.SchemaProps{ - Description: "GitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.", - Ref: ref("k8s.io/api/core/v1.GitRepoVolumeSource"), - }, - }, - "secret": { - SchemaProps: spec.SchemaProps{ - Description: "Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", - Ref: ref("k8s.io/api/core/v1.SecretVolumeSource"), - }, - }, - "nfs": { - SchemaProps: spec.SchemaProps{ - Description: "NFS represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", - Ref: ref("k8s.io/api/core/v1.NFSVolumeSource"), - }, - }, - "iscsi": { - SchemaProps: spec.SchemaProps{ - Description: "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md", - Ref: ref("k8s.io/api/core/v1.ISCSIVolumeSource"), - }, - }, - "glusterfs": { - SchemaProps: spec.SchemaProps{ - Description: "Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md", - Ref: ref("k8s.io/api/core/v1.GlusterfsVolumeSource"), - }, - }, - "persistentVolumeClaim": { - SchemaProps: spec.SchemaProps{ - Description: "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", - Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource"), - }, - }, - "rbd": { - SchemaProps: spec.SchemaProps{ - Description: "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md", - Ref: ref("k8s.io/api/core/v1.RBDVolumeSource"), - }, - }, - "flexVolume": { - SchemaProps: spec.SchemaProps{ - Description: "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", - Ref: ref("k8s.io/api/core/v1.FlexVolumeSource"), - }, - }, - "cinder": { - SchemaProps: spec.SchemaProps{ - Description: "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", - Ref: ref("k8s.io/api/core/v1.CinderVolumeSource"), - }, - }, - "cephfs": { - SchemaProps: spec.SchemaProps{ - Description: "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime", - Ref: ref("k8s.io/api/core/v1.CephFSVolumeSource"), - }, - }, - "flocker": { - SchemaProps: spec.SchemaProps{ - Description: "Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running", - Ref: ref("k8s.io/api/core/v1.FlockerVolumeSource"), - }, - }, - "downwardAPI": { - SchemaProps: spec.SchemaProps{ - Description: "DownwardAPI represents downward API about the pod that should populate this volume", - Ref: ref("k8s.io/api/core/v1.DownwardAPIVolumeSource"), - }, - }, - "fc": { - SchemaProps: spec.SchemaProps{ - Description: "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", - Ref: ref("k8s.io/api/core/v1.FCVolumeSource"), - }, - }, - "azureFile": { - SchemaProps: spec.SchemaProps{ - Description: "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", - Ref: ref("k8s.io/api/core/v1.AzureFileVolumeSource"), - }, - }, - "configMap": { - SchemaProps: spec.SchemaProps{ - Description: "ConfigMap represents a configMap that should populate this volume", - Ref: ref("k8s.io/api/core/v1.ConfigMapVolumeSource"), - }, - }, - "vsphereVolume": { - SchemaProps: spec.SchemaProps{ - Description: "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", - Ref: ref("k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource"), - }, - }, - "quobyte": { - SchemaProps: spec.SchemaProps{ - Description: "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime", - Ref: ref("k8s.io/api/core/v1.QuobyteVolumeSource"), - }, - }, - "azureDisk": { - SchemaProps: spec.SchemaProps{ - Description: "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", - Ref: ref("k8s.io/api/core/v1.AzureDiskVolumeSource"), - }, - }, - "photonPersistentDisk": { - SchemaProps: spec.SchemaProps{ - Description: "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", - Ref: ref("k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource"), - }, - }, - "projected": { - SchemaProps: spec.SchemaProps{ - Description: "Items for all in one resources secrets, configmaps, and downward API", - Ref: ref("k8s.io/api/core/v1.ProjectedVolumeSource"), - }, - }, - "portworxVolume": { - SchemaProps: spec.SchemaProps{ - Description: "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine", - Ref: ref("k8s.io/api/core/v1.PortworxVolumeSource"), - }, - }, - "scaleIO": { - SchemaProps: spec.SchemaProps{ - Description: "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", - Ref: ref("k8s.io/api/core/v1.ScaleIOVolumeSource"), - }, - }, - "storageos": { - SchemaProps: spec.SchemaProps{ - Description: "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.", - Ref: ref("k8s.io/api/core/v1.StorageOSVolumeSource"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource", "k8s.io/api/core/v1.AzureDiskVolumeSource", "k8s.io/api/core/v1.AzureFileVolumeSource", "k8s.io/api/core/v1.CephFSVolumeSource", "k8s.io/api/core/v1.CinderVolumeSource", "k8s.io/api/core/v1.ConfigMapVolumeSource", "k8s.io/api/core/v1.DownwardAPIVolumeSource", "k8s.io/api/core/v1.EmptyDirVolumeSource", "k8s.io/api/core/v1.FCVolumeSource", "k8s.io/api/core/v1.FlexVolumeSource", "k8s.io/api/core/v1.FlockerVolumeSource", "k8s.io/api/core/v1.GCEPersistentDiskVolumeSource", "k8s.io/api/core/v1.GitRepoVolumeSource", "k8s.io/api/core/v1.GlusterfsVolumeSource", "k8s.io/api/core/v1.HostPathVolumeSource", "k8s.io/api/core/v1.ISCSIVolumeSource", "k8s.io/api/core/v1.NFSVolumeSource", "k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource", "k8s.io/api/core/v1.PhotonPersistentDiskVolumeSource", "k8s.io/api/core/v1.PortworxVolumeSource", "k8s.io/api/core/v1.ProjectedVolumeSource", "k8s.io/api/core/v1.QuobyteVolumeSource", "k8s.io/api/core/v1.RBDVolumeSource", "k8s.io/api/core/v1.ScaleIOVolumeSource", "k8s.io/api/core/v1.SecretVolumeSource", "k8s.io/api/core/v1.StorageOSVolumeSource", "k8s.io/api/core/v1.VsphereVirtualDiskVolumeSource"}, - } -} - -func schema_k8sio_api_core_v1_VsphereVirtualDiskVolumeSource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Represents a vSphere volume resource.", - Properties: map[string]spec.Schema{ - "volumePath": { - SchemaProps: spec.SchemaProps{ - Description: "Path that identifies vSphere volume vmdk", - Type: []string{"string"}, - Format: "", - }, - }, - "fsType": { - SchemaProps: spec.SchemaProps{ - Description: "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", - Type: []string{"string"}, - Format: "", - }, - }, - "storagePolicyName": { - SchemaProps: spec.SchemaProps{ - Description: "Storage Policy Based Management (SPBM) profile name.", - Type: []string{"string"}, - Format: "", - }, - }, - "storagePolicyID": { - SchemaProps: spec.SchemaProps{ - Description: "Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"volumePath"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_k8sio_api_core_v1_WeightedPodAffinityTerm(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)", - Properties: map[string]spec.Schema{ - "weight": { - SchemaProps: spec.SchemaProps{ - Description: "weight associated with matching the corresponding podAffinityTerm, in the range 1-100.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - "podAffinityTerm": { - SchemaProps: spec.SchemaProps{ - Description: "Required. A pod affinity term, associated with the corresponding weight.", - Ref: ref("k8s.io/api/core/v1.PodAffinityTerm"), - }, - }, - }, - Required: []string{"weight", "podAffinityTerm"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.PodAffinityTerm"}, - } -} - -func schema_pkg_apis_meta_v1_APIGroup(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "APIGroup contains the name, the supported versions, and the preferred version of a group.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "name": { - SchemaProps: spec.SchemaProps{ - Description: "name is the name of the group.", - Type: []string{"string"}, - Format: "", - }, - }, - "versions": { - SchemaProps: spec.SchemaProps{ - Description: "versions are the versions supported in this group.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.GroupVersionForDiscovery"), - }, - }, - }, - }, - }, - "preferredVersion": { - SchemaProps: spec.SchemaProps{ - Description: "preferredVersion is the version preferred by the API server, which probably is the storage version.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.GroupVersionForDiscovery"), - }, - }, - "serverAddressByClientCIDRs": { - SchemaProps: spec.SchemaProps{ - Description: "a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ServerAddressByClientCIDR"), - }, - }, - }, - }, - }, - }, - Required: []string{"name", "versions"}, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.GroupVersionForDiscovery", "k8s.io/apimachinery/pkg/apis/meta/v1.ServerAddressByClientCIDR"}, - } -} - -func schema_pkg_apis_meta_v1_APIGroupList(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "APIGroupList is a list of APIGroup, to allow clients to discover the API at /apis.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "groups": { - SchemaProps: spec.SchemaProps{ - Description: "groups is a list of APIGroup.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.APIGroup"), - }, - }, - }, - }, - }, - }, - Required: []string{"groups"}, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.APIGroup"}, - } -} - -func schema_pkg_apis_meta_v1_APIResource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "APIResource specifies the name of a resource and whether it is namespaced.", - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "name is the plural name of the resource.", - Type: []string{"string"}, - Format: "", - }, - }, - "singularName": { - SchemaProps: spec.SchemaProps{ - Description: "singularName is the singular name of the resource. This allows clients to handle plural and singular opaquely. The singularName is more correct for reporting status on a single item and both singular and plural are allowed from the kubectl CLI interface.", - Type: []string{"string"}, - Format: "", - }, - }, - "namespaced": { - SchemaProps: spec.SchemaProps{ - Description: "namespaced indicates if a resource is namespaced or not.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "group": { - SchemaProps: spec.SchemaProps{ - Description: "group is the preferred group of the resource. Empty implies the group of the containing resource list. For subresources, this may have a different value, for example: Scale\".", - Type: []string{"string"}, - Format: "", - }, - }, - "version": { - SchemaProps: spec.SchemaProps{ - Description: "version is the preferred version of the resource. Empty implies the version of the containing resource list For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)\".", - Type: []string{"string"}, - Format: "", - }, - }, - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo')", - Type: []string{"string"}, - Format: "", - }, - }, - "verbs": { - SchemaProps: spec.SchemaProps{ - Description: "verbs is a list of supported kube verbs (this includes get, list, watch, create, update, patch, delete, deletecollection, and proxy)", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "shortNames": { - SchemaProps: spec.SchemaProps{ - Description: "shortNames is a list of suggested short names of the resource.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "categories": { - SchemaProps: spec.SchemaProps{ - Description: "categories is a list of the grouped resources this resource belongs to (e.g. 'all')", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - }, - Required: []string{"name", "singularName", "namespaced", "kind", "verbs"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_pkg_apis_meta_v1_APIResourceList(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "APIResourceList is a list of APIResource, it is used to expose the name of the resources supported in a specific group and version, and if the resource is namespaced.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "groupVersion": { - SchemaProps: spec.SchemaProps{ - Description: "groupVersion is the group and version this APIResourceList is for.", - Type: []string{"string"}, - Format: "", - }, - }, - "resources": { - SchemaProps: spec.SchemaProps{ - Description: "resources contains the name of the resources and if they are namespaced.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.APIResource"), - }, - }, - }, - }, - }, - }, - Required: []string{"groupVersion", "resources"}, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.APIResource"}, - } -} - -func schema_pkg_apis_meta_v1_APIVersions(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "APIVersions lists the versions that are available, to allow clients to discover the API at /api, which is the root path of the legacy v1 API.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "versions": { - SchemaProps: spec.SchemaProps{ - Description: "versions are the api versions that are available.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "serverAddressByClientCIDRs": { - SchemaProps: spec.SchemaProps{ - Description: "a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ServerAddressByClientCIDR"), - }, - }, - }, - }, - }, - }, - Required: []string{"versions", "serverAddressByClientCIDRs"}, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.ServerAddressByClientCIDR"}, - } -} - -func schema_pkg_apis_meta_v1_CreateOptions(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "CreateOptions may be provided when creating an API object.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "dryRun": { - SchemaProps: spec.SchemaProps{ - Description: "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "includeUninitialized": { - SchemaProps: spec.SchemaProps{ - Description: "If IncludeUninitialized is specified, the object may be returned without completing initialization.", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{}, - } -} - -func schema_pkg_apis_meta_v1_DeleteOptions(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "DeleteOptions may be provided when deleting an API object.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "gracePeriodSeconds": { - SchemaProps: spec.SchemaProps{ - Description: "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.", - Type: []string{"integer"}, - Format: "int64", - }, - }, - "preconditions": { - SchemaProps: spec.SchemaProps{ - Description: "Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be returned.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Preconditions"), - }, - }, - "orphanDependents": { - SchemaProps: spec.SchemaProps{ - Description: "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "propagationPolicy": { - SchemaProps: spec.SchemaProps{ - Description: "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.", - Type: []string{"string"}, - Format: "", - }, - }, - "dryRun": { - SchemaProps: spec.SchemaProps{ - Description: "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.Preconditions"}, - } -} - -func schema_pkg_apis_meta_v1_Duration(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Duration is a wrapper around time.Duration which supports correct marshaling to YAML and JSON. In particular, it marshals into strings, which can be used as map keys in json.", - Properties: map[string]spec.Schema{ - "Duration": { - SchemaProps: spec.SchemaProps{ - Type: []string{"integer"}, - Format: "int64", - }, - }, - }, - Required: []string{"Duration"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_pkg_apis_meta_v1_ExportOptions(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ExportOptions is the query options to the standard REST get call.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "export": { - SchemaProps: spec.SchemaProps{ - Description: "Should this value be exported. Export strips fields that a user can not specify.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "exact": { - SchemaProps: spec.SchemaProps{ - Description: "Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - Required: []string{"export", "exact"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_pkg_apis_meta_v1_GetOptions(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "GetOptions is the standard query options to the standard REST get call.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "resourceVersion": { - SchemaProps: spec.SchemaProps{ - Description: "When specified: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", - Type: []string{"string"}, - Format: "", - }, - }, - "includeUninitialized": { - SchemaProps: spec.SchemaProps{ - Description: "If true, partially initialized resources are included in the response.", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{}, - } -} - -func schema_pkg_apis_meta_v1_GroupKind(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "GroupKind specifies a Group and a Kind, but does not force a version. This is useful for identifying concepts during lookup stages without having partially valid types", - Properties: map[string]spec.Schema{ - "group": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - "kind": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"group", "kind"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_pkg_apis_meta_v1_GroupResource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "GroupResource specifies a Group and a Resource, but does not force a version. This is useful for identifying concepts during lookup stages without having partially valid types", - Properties: map[string]spec.Schema{ - "group": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - "resource": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"group", "resource"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_pkg_apis_meta_v1_GroupVersion(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "GroupVersion contains the \"group\" and the \"version\", which uniquely identifies the API.", - Properties: map[string]spec.Schema{ - "group": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - "version": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"group", "version"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_pkg_apis_meta_v1_GroupVersionForDiscovery(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "GroupVersion contains the \"group/version\" and \"version\" string of a version. It is made a struct to keep extensibility.", - Properties: map[string]spec.Schema{ - "groupVersion": { - SchemaProps: spec.SchemaProps{ - Description: "groupVersion specifies the API group and version in the form \"group/version\"", - Type: []string{"string"}, - Format: "", - }, - }, - "version": { - SchemaProps: spec.SchemaProps{ - Description: "version specifies the version in the form of \"version\". This is to save the clients the trouble of splitting the GroupVersion.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"groupVersion", "version"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_pkg_apis_meta_v1_GroupVersionKind(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "GroupVersionKind unambiguously identifies a kind. It doesn't anonymously include GroupVersion to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling", - Properties: map[string]spec.Schema{ - "group": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - "version": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - "kind": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"group", "version", "kind"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_pkg_apis_meta_v1_GroupVersionResource(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "GroupVersionResource unambiguously identifies a resource. It doesn't anonymously include GroupVersion to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling", - Properties: map[string]spec.Schema{ - "group": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - "version": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - "resource": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"group", "version", "resource"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_pkg_apis_meta_v1_Initializer(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Initializer is information about an initializer that has not yet completed.", - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "name of the process that is responsible for initializing this object.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"name"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_pkg_apis_meta_v1_Initializers(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Initializers tracks the progress of initialization.", - Properties: map[string]spec.Schema{ - "pending": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "name", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "Pending is a list of initializers that must execute in order before this object is visible. When the last pending initializer is removed, and no failing result is set, the initializers struct will be set to nil and the object is considered as initialized and visible to all clients.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Initializer"), - }, - }, - }, - }, - }, - "result": { - SchemaProps: spec.SchemaProps{ - Description: "If result is set with the Failure field, the object will be persisted to storage and then deleted, ensuring that other clients can observe the deletion.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Status"), - }, - }, - }, - Required: []string{"pending"}, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.Initializer", "k8s.io/apimachinery/pkg/apis/meta/v1.Status"}, - } -} - -func schema_pkg_apis_meta_v1_InternalEvent(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "InternalEvent makes watch.Event versioned", - Properties: map[string]spec.Schema{ - "Type": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - "Object": { - SchemaProps: spec.SchemaProps{ - Description: "Object is:\n * If Type is Added or Modified: the new state of the object.\n * If Type is Deleted: the state of the object immediately before deletion.\n * If Type is Error: *api.Status is recommended; other types may make sense\n depending on context.", - Ref: ref("k8s.io/apimachinery/pkg/runtime.Object"), - }, - }, - }, - Required: []string{"Type", "Object"}, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/runtime.Object"}, - } -} - -func schema_pkg_apis_meta_v1_LabelSelector(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.", - Properties: map[string]spec.Schema{ - "matchLabels": { - SchemaProps: spec.SchemaProps{ - Description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "matchExpressions": { - SchemaProps: spec.SchemaProps{ - Description: "matchExpressions is a list of label selector requirements. The requirements are ANDed.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelectorRequirement"), - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelectorRequirement"}, - } -} - -func schema_pkg_apis_meta_v1_LabelSelectorRequirement(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", - Properties: map[string]spec.Schema{ - "key": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "key", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "key is the label key that the selector applies to.", - Type: []string{"string"}, - Format: "", - }, - }, - "operator": { - SchemaProps: spec.SchemaProps{ - Description: "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.", - Type: []string{"string"}, - Format: "", - }, - }, - "values": { - SchemaProps: spec.SchemaProps{ - Description: "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - }, - Required: []string{"key", "operator"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_pkg_apis_meta_v1_List(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "List holds a list of objects, which may not be known by the server.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), - }, - }, - "items": { - SchemaProps: spec.SchemaProps{ - Description: "List of objects", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), - }, - }, - }, - }, - }, - }, - Required: []string{"items"}, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta", "k8s.io/apimachinery/pkg/runtime.RawExtension"}, - } -} - -func schema_pkg_apis_meta_v1_ListMeta(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}.", - Properties: map[string]spec.Schema{ - "selfLink": { - SchemaProps: spec.SchemaProps{ - Description: "selfLink is a URL representing this object. Populated by the system. Read-only.", - Type: []string{"string"}, - Format: "", - }, - }, - "resourceVersion": { - SchemaProps: spec.SchemaProps{ - Description: "String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency", - Type: []string{"string"}, - Format: "", - }, - }, - "continue": { - SchemaProps: spec.SchemaProps{ - Description: "continue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a consistent list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response, unless you have received this token from an error message.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{}, - } -} - -func schema_pkg_apis_meta_v1_ListOptions(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ListOptions is the query options to a standard REST list call.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "labelSelector": { - SchemaProps: spec.SchemaProps{ - Description: "A selector to restrict the list of returned objects by their labels. Defaults to everything.", - Type: []string{"string"}, - Format: "", - }, - }, - "fieldSelector": { - SchemaProps: spec.SchemaProps{ - Description: "A selector to restrict the list of returned objects by their fields. Defaults to everything.", - Type: []string{"string"}, - Format: "", - }, - }, - "includeUninitialized": { - SchemaProps: spec.SchemaProps{ - Description: "If true, partially initialized resources are included in the response.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "watch": { - SchemaProps: spec.SchemaProps{ - Description: "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "resourceVersion": { - SchemaProps: spec.SchemaProps{ - Description: "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", - Type: []string{"string"}, - Format: "", - }, - }, - "timeoutSeconds": { - SchemaProps: spec.SchemaProps{ - Description: "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", - Type: []string{"integer"}, - Format: "int64", - }, - }, - "limit": { - SchemaProps: spec.SchemaProps{ - Description: "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", - Type: []string{"integer"}, - Format: "int64", - }, - }, - "continue": { - SchemaProps: spec.SchemaProps{ - Description: "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{}, - } -} - -func schema_pkg_apis_meta_v1_MicroTime(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "MicroTime is version of Time with microsecond level precision.", - Type: v1.MicroTime{}.OpenAPISchemaType(), - Format: v1.MicroTime{}.OpenAPISchemaFormat(), - }, - }, - } -} - -func schema_pkg_apis_meta_v1_ObjectMeta(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.", - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names", - Type: []string{"string"}, - Format: "", - }, - }, - "generateName": { - SchemaProps: spec.SchemaProps{ - Description: "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency", - Type: []string{"string"}, - Format: "", - }, - }, - "namespace": { - SchemaProps: spec.SchemaProps{ - Description: "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces", - Type: []string{"string"}, - Format: "", - }, - }, - "selfLink": { - SchemaProps: spec.SchemaProps{ - Description: "SelfLink is a URL representing this object. Populated by the system. Read-only.", - Type: []string{"string"}, - Format: "", - }, - }, - "uid": { - SchemaProps: spec.SchemaProps{ - Description: "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", - Type: []string{"string"}, - Format: "", - }, - }, - "resourceVersion": { - SchemaProps: spec.SchemaProps{ - Description: "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency", - Type: []string{"string"}, - Format: "", - }, - }, - "generation": { - SchemaProps: spec.SchemaProps{ - Description: "A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.", - Type: []string{"integer"}, - Format: "int64", - }, - }, - "creationTimestamp": { - SchemaProps: spec.SchemaProps{ - Description: "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), - }, - }, - "deletionTimestamp": { - SchemaProps: spec.SchemaProps{ - Description: "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), - }, - }, - "deletionGracePeriodSeconds": { - SchemaProps: spec.SchemaProps{ - Description: "Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.", - Type: []string{"integer"}, - Format: "int64", - }, - }, - "labels": { - SchemaProps: spec.SchemaProps{ - Description: "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "annotations": { - SchemaProps: spec.SchemaProps{ - Description: "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "ownerReferences": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-merge-key": "uid", - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.OwnerReference"), - }, - }, - }, - }, - }, - "initializers": { - SchemaProps: spec.SchemaProps{ - Description: "An initializer is a controller which enforces some system invariant at object creation time. This field is a list of initializers that have not yet acted on this object. If nil or empty, this object has been completely initialized. Otherwise, the object is considered uninitialized and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to observe uninitialized objects.\n\nWhen an object is created, the system will populate this list with the current set of initializers. Only privileged users may set or modify this list. Once it is empty, it may not be modified further by any user.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Initializers"), - }, - }, - "finalizers": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-patch-strategy": "merge", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "clusterName": { - SchemaProps: spec.SchemaProps{ - Description: "The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.Initializers", "k8s.io/apimachinery/pkg/apis/meta/v1.OwnerReference", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, - } -} - -func schema_pkg_apis_meta_v1_OwnerReference(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field.", - Properties: map[string]spec.Schema{ - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "API version of the referent.", - Type: []string{"string"}, - Format: "", - }, - }, - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names", - Type: []string{"string"}, - Format: "", - }, - }, - "uid": { - SchemaProps: spec.SchemaProps{ - Description: "UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", - Type: []string{"string"}, - Format: "", - }, - }, - "controller": { - SchemaProps: spec.SchemaProps{ - Description: "If true, this reference points to the managing controller.", - Type: []string{"boolean"}, - Format: "", - }, - }, - "blockOwnerDeletion": { - SchemaProps: spec.SchemaProps{ - Description: "If true, AND if the owner has the \"foregroundDeletion\" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. Defaults to false. To set this field, a user needs \"delete\" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned.", - Type: []string{"boolean"}, - Format: "", - }, - }, - }, - Required: []string{"apiVersion", "kind", "name", "uid"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_pkg_apis_meta_v1_Patch(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Patch is provided to give a concrete name and type to the Kubernetes PATCH request body.", - Properties: map[string]spec.Schema{}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_pkg_apis_meta_v1_Preconditions(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.", - Properties: map[string]spec.Schema{ - "uid": { - SchemaProps: spec.SchemaProps{ - Description: "Specifies the target UID.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{}, - } -} - -func schema_pkg_apis_meta_v1_RootPaths(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "RootPaths lists the paths available at root. For example: \"/healthz\", \"/apis\".", - Properties: map[string]spec.Schema{ - "paths": { - SchemaProps: spec.SchemaProps{ - Description: "paths are the paths available at root.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - }, - Required: []string{"paths"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_pkg_apis_meta_v1_ServerAddressByClientCIDR(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match.", - Properties: map[string]spec.Schema{ - "clientCIDR": { - SchemaProps: spec.SchemaProps{ - Description: "The CIDR with which clients can match their IP to figure out the server address that they should use.", - Type: []string{"string"}, - Format: "", - }, - }, - "serverAddress": { - SchemaProps: spec.SchemaProps{ - Description: "Address of this server, suitable for a client that matches the above CIDR. This can be a hostname, hostname:port, IP or IP:port.", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"clientCIDR", "serverAddress"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_pkg_apis_meta_v1_Status(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Status is a return value for calls that don't return other objects.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), - }, - }, - "status": { - SchemaProps: spec.SchemaProps{ - Description: "Status of the operation. One of: \"Success\" or \"Failure\". More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - Type: []string{"string"}, - Format: "", - }, - }, - "message": { - SchemaProps: spec.SchemaProps{ - Description: "A human-readable description of the status of this operation.", - Type: []string{"string"}, - Format: "", - }, - }, - "reason": { - SchemaProps: spec.SchemaProps{ - Description: "A machine-readable description of why this operation is in the \"Failure\" status. If this value is empty there is no information available. A Reason clarifies an HTTP status code but does not override it.", - Type: []string{"string"}, - Format: "", - }, - }, - "details": { - SchemaProps: spec.SchemaProps{ - Description: "Extended data associated with the reason. Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.StatusDetails"), - }, - }, - "code": { - SchemaProps: spec.SchemaProps{ - Description: "Suggested HTTP return code for this status, 0 if not set.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta", "k8s.io/apimachinery/pkg/apis/meta/v1.StatusDetails"}, - } -} - -func schema_pkg_apis_meta_v1_StatusCause(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "StatusCause provides more information about an api.Status failure, including cases when multiple errors are encountered.", - Properties: map[string]spec.Schema{ - "reason": { - SchemaProps: spec.SchemaProps{ - Description: "A machine-readable description of the cause of the error. If this value is empty there is no information available.", - Type: []string{"string"}, - Format: "", - }, - }, - "message": { - SchemaProps: spec.SchemaProps{ - Description: "A human-readable description of the cause of the error. This field may be presented as-is to a reader.", - Type: []string{"string"}, - Format: "", - }, - }, - "field": { - SchemaProps: spec.SchemaProps{ - Description: "The field of the resource that has caused this error, as named by its JSON serialization. May include dot and postfix notation for nested attributes. Arrays are zero-indexed. Fields may appear more than once in an array of causes due to fields having multiple errors. Optional.\n\nExamples:\n \"name\" - the field \"name\" on the current resource\n \"items[0].name\" - the field \"name\" on the first array entry in \"items\"", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{}, - } -} - -func schema_pkg_apis_meta_v1_StatusDetails(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "StatusDetails is a set of additional properties that MAY be set by the server to provide additional information about a response. The Reason field of a Status object defines what attributes will be set. Clients must ignore fields that do not match the defined type of each attribute, and should assume that any attribute may be empty, invalid, or under defined.", - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "The name attribute of the resource associated with the status StatusReason (when there is a single name which can be described).", - Type: []string{"string"}, - Format: "", - }, - }, - "group": { - SchemaProps: spec.SchemaProps{ - Description: "The group attribute of the resource associated with the status StatusReason.", - Type: []string{"string"}, - Format: "", - }, - }, - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "uid": { - SchemaProps: spec.SchemaProps{ - Description: "UID of the resource. (when there is a single resource which can be described). More info: http://kubernetes.io/docs/user-guide/identifiers#uids", - Type: []string{"string"}, - Format: "", - }, - }, - "causes": { - SchemaProps: spec.SchemaProps{ - Description: "The Causes array includes more details associated with the StatusReason failure. Not all StatusReasons may provide detailed causes.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.StatusCause"), - }, - }, - }, - }, - }, - "retryAfterSeconds": { - SchemaProps: spec.SchemaProps{ - Description: "If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.StatusCause"}, - } -} - -func schema_pkg_apis_meta_v1_Time(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.", - Type: v1.Time{}.OpenAPISchemaType(), - Format: v1.Time{}.OpenAPISchemaFormat(), - }, - }, - } -} - -func schema_pkg_apis_meta_v1_Timestamp(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Timestamp is a struct that is equivalent to Time, but intended for protobuf marshalling/unmarshalling. It is generated into a serialization that matches Time. Do not use in Go structs.", - Properties: map[string]spec.Schema{ - "seconds": { - SchemaProps: spec.SchemaProps{ - Description: "Represents seconds of UTC time since Unix epoch 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z inclusive.", - Type: []string{"integer"}, - Format: "int64", - }, - }, - "nanos": { - SchemaProps: spec.SchemaProps{ - Description: "Non-negative fractions of a second at nanosecond resolution. Negative second values with fractions must still have non-negative nanos values that count forward in time. Must be from 0 to 999,999,999 inclusive. This field may be limited in precision depending on context.", - Type: []string{"integer"}, - Format: "int32", - }, - }, - }, - Required: []string{"seconds", "nanos"}, - }, - }, - Dependencies: []string{}, - } -} - -func schema_pkg_apis_meta_v1_TypeMeta(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "TypeMeta describes an individual object in an API response or request with strings representing the type of the object and its API schema version. Structures that are versioned or persisted should inline TypeMeta.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - Dependencies: []string{}, - } -} - -func schema_pkg_apis_meta_v1_UpdateOptions(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "UpdateOptions may be provided when updating an API object.", - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "dryRun": { - SchemaProps: spec.SchemaProps{ - Description: "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{}, - } -} - -func schema_pkg_apis_meta_v1_WatchEvent(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Event represents a single event to a watched resource.", - Properties: map[string]spec.Schema{ - "type": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - "object": { - SchemaProps: spec.SchemaProps{ - Description: "Object is:\n * If Type is Added or Modified: the new state of the object.\n * If Type is Deleted: the state of the object immediately before deletion.\n * If Type is Error: *Status is recommended; other types may make sense\n depending on context.", - Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), - }, - }, - }, - Required: []string{"type", "object"}, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/runtime.RawExtension"}, - } -} diff --git a/cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1/register.go b/cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1/register.go deleted file mode 100644 index 60b8add..0000000 --- a/cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1/register.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2018 The prometheus-operator Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - - "github.com/coreos/prometheus-operator/pkg/apis/monitoring" -) - -// SchemeGroupVersion is the group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: monitoring.GroupName, Version: Version} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. - SchemeBuilder runtime.SchemeBuilder - localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -func init() { - // We only register manually written functions here. The registration of the - // generated functions takes place in the generated files. The separation - // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addKnownTypes) -} - -// Adds the list of known types to api.Scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &Prometheus{}, - &PrometheusList{}, - &ServiceMonitor{}, - &ServiceMonitorList{}, - &Alertmanager{}, - &AlertmanagerList{}, - &PrometheusRule{}, - &PrometheusRuleList{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1/types.go b/cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1/types.go deleted file mode 100644 index 2ff0573..0000000 --- a/cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1/types.go +++ /dev/null @@ -1,872 +0,0 @@ -// Copyright 2018 The prometheus-operator Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1 - -import ( - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/intstr" -) - -const ( - Version = "v1" - - PrometheusesKind = "Prometheus" - PrometheusName = "prometheuses" - PrometheusKindKey = "prometheus" - - AlertmanagersKind = "Alertmanager" - AlertmanagerName = "alertmanagers" - AlertManagerKindKey = "alertmanager" - - ServiceMonitorsKind = "ServiceMonitor" - ServiceMonitorName = "servicemonitors" - ServiceMonitorKindKey = "servicemonitor" - - PrometheusRuleKind = "PrometheusRule" - PrometheusRuleName = "prometheusrules" - PrometheusRuleKindKey = "prometheusrule" -) - -// Prometheus defines a Prometheus deployment. -// +genclient -// +k8s:openapi-gen=true -type Prometheus struct { - metav1.TypeMeta `json:",inline"` - // Standard object’s metadata. More info: - // https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata - // +k8s:openapi-gen=false - metav1.ObjectMeta `json:"metadata,omitempty"` - // Specification of the desired behavior of the Prometheus cluster. More info: - // https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status - Spec PrometheusSpec `json:"spec"` - // Most recent observed status of the Prometheus cluster. Read-only. Not - // included when requesting from the apiserver, only from the Prometheus - // Operator API itself. More info: - // https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status - Status *PrometheusStatus `json:"status,omitempty"` -} - -// PrometheusList is a list of Prometheuses. -// +k8s:openapi-gen=true -type PrometheusList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata - // More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata - metav1.ListMeta `json:"metadata,omitempty"` - // List of Prometheuses - Items []*Prometheus `json:"items"` -} - -// PrometheusSpec is a specification of the desired behavior of the Prometheus cluster. More info: -// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status -// +k8s:openapi-gen=true -type PrometheusSpec struct { - // Standard object’s metadata. More info: - // https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata - // Metadata Labels and Annotations gets propagated to the prometheus pods. - PodMetadata *metav1.ObjectMeta `json:"podMetadata,omitempty"` - // ServiceMonitors to be selected for target discovery. - ServiceMonitorSelector *metav1.LabelSelector `json:"serviceMonitorSelector,omitempty"` - // Namespaces to be selected for ServiceMonitor discovery. If nil, only - // check own namespace. - ServiceMonitorNamespaceSelector *metav1.LabelSelector `json:"serviceMonitorNamespaceSelector,omitempty"` - // Version of Prometheus to be deployed. - Version string `json:"version,omitempty"` - // Tag of Prometheus container image to be deployed. Defaults to the value of `version`. - // Version is ignored if Tag is set. - Tag string `json:"tag,omitempty"` - // SHA of Prometheus container image to be deployed. Defaults to the value of `version`. - // Similar to a tag, but the SHA explicitly deploys an immutable container image. - // Version and Tag are ignored if SHA is set. - SHA string `json:"sha,omitempty"` - // When a Prometheus deployment is paused, no actions except for deletion - // will be performed on the underlying objects. - Paused bool `json:"paused,omitempty"` - // Image if specified has precedence over baseImage, tag and sha - // combinations. Specifying the version is still necessary to ensure the - // Prometheus Operator knows what version of Prometheus is being - // configured. - Image *string `json:"image,omitempty"` - // Base image to use for a Prometheus deployment. - BaseImage string `json:"baseImage,omitempty"` - // An optional list of references to secrets in the same namespace - // to use for pulling prometheus and alertmanager images from registries - // see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod - ImagePullSecrets []v1.LocalObjectReference `json:"imagePullSecrets,omitempty"` - // Number of instances to deploy for a Prometheus deployment. - Replicas *int32 `json:"replicas,omitempty"` - // Name of Prometheus external label used to denote replica name. - // Defaults to the value of `prometheus_replica`. - ReplicaExternalLabelName string `json:"replicaExternalLabelName,omitempty"` - // Time duration Prometheus shall retain data for. Default is '24h', - // and must match the regular expression `[0-9]+(ms|s|m|h|d|w|y)` (milliseconds seconds minutes hours days weeks years). - Retention string `json:"retention,omitempty"` - // Log level for Prometheus to be configured with. - LogLevel string `json:"logLevel,omitempty"` - // Log format for Prometheus to be configured with. - LogFormat string `json:"logFormat,omitempty"` - // Interval between consecutive scrapes. - ScrapeInterval string `json:"scrapeInterval,omitempty"` - // Interval between consecutive evaluations. - EvaluationInterval string `json:"evaluationInterval,omitempty"` - // /--rules.*/ command-line arguments. - Rules Rules `json:"rules,omitempty"` - // The labels to add to any time series or alerts when communicating with - // external systems (federation, remote storage, Alertmanager). - ExternalLabels map[string]string `json:"externalLabels,omitempty"` - // Enable access to prometheus web admin API. Defaults to the value of `false`. - // WARNING: Enabling the admin APIs enables mutating endpoints, to delete data, - // shutdown Prometheus, and more. Enabling this should be done with care and the - // user is advised to add additional authentication authorization via a proxy to - // ensure only clients authorized to perform these actions can do so. - // For more information see https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-admin-apis - EnableAdminAPI bool `json:"enableAdminAPI,omitempty"` - // The external URL the Prometheus instances will be available under. This is - // necessary to generate correct URLs. This is necessary if Prometheus is not - // served from root of a DNS name. - ExternalURL string `json:"externalUrl,omitempty"` - // The route prefix Prometheus registers HTTP handlers for. This is useful, - // if using ExternalURL and a proxy is rewriting HTTP routes of a request, - // and the actual ExternalURL is still true, but the server serves requests - // under a different route prefix. For example for use with `kubectl proxy`. - RoutePrefix string `json:"routePrefix,omitempty"` - // QuerySpec defines the query command line flags when starting Prometheus. - Query *QuerySpec `json:"query,omitempty"` - // Storage spec to specify how storage shall be used. - Storage *StorageSpec `json:"storage,omitempty"` - // A selector to select which PrometheusRules to mount for loading alerting - // rules from. Until (excluding) Prometheus Operator v0.24.0 Prometheus - // Operator will migrate any legacy rule ConfigMaps to PrometheusRule custom - // resources selected by RuleSelector. Make sure it does not match any config - // maps that you do not want to be migrated. - RuleSelector *metav1.LabelSelector `json:"ruleSelector,omitempty"` - // Namespaces to be selected for PrometheusRules discovery. If unspecified, only - // the same namespace as the Prometheus object is in is used. - RuleNamespaceSelector *metav1.LabelSelector `json:"ruleNamespaceSelector,omitempty"` - // Define details regarding alerting. - Alerting *AlertingSpec `json:"alerting,omitempty"` - // Define resources requests and limits for single Pods. - Resources v1.ResourceRequirements `json:"resources,omitempty"` - // Define which Nodes the Pods are scheduled on. - NodeSelector map[string]string `json:"nodeSelector,omitempty"` - // ServiceAccountName is the name of the ServiceAccount to use to run the - // Prometheus Pods. - ServiceAccountName string `json:"serviceAccountName,omitempty"` - // Secrets is a list of Secrets in the same namespace as the Prometheus - // object, which shall be mounted into the Prometheus Pods. - // The Secrets are mounted into /etc/prometheus/secrets/. - Secrets []string `json:"secrets,omitempty"` - // ConfigMaps is a list of ConfigMaps in the same namespace as the Prometheus - // object, which shall be mounted into the Prometheus Pods. - // The ConfigMaps are mounted into /etc/prometheus/configmaps/. - ConfigMaps []string `json:"configMaps,omitempty"` - // If specified, the pod's scheduling constraints. - Affinity *v1.Affinity `json:"affinity,omitempty"` - // If specified, the pod's tolerations. - Tolerations []v1.Toleration `json:"tolerations,omitempty"` - // If specified, the remote_write spec. This is an experimental feature, it may change in any upcoming release in a breaking way. - RemoteWrite []RemoteWriteSpec `json:"remoteWrite,omitempty"` - // If specified, the remote_read spec. This is an experimental feature, it may change in any upcoming release in a breaking way. - RemoteRead []RemoteReadSpec `json:"remoteRead,omitempty"` - // SecurityContext holds pod-level security attributes and common container settings. - // This defaults to non root user with uid 1000 and gid 2000 for Prometheus >v2.0 and - // default PodSecurityContext for other versions. - SecurityContext *v1.PodSecurityContext `json:"securityContext,omitempty"` - // ListenLocal makes the Prometheus server listen on loopback, so that it - // does not bind against the Pod IP. - ListenLocal bool `json:"listenLocal,omitempty"` - // Containers allows injecting additional containers. This is meant to - // allow adding an authentication proxy to a Prometheus pod. - Containers []v1.Container `json:"containers,omitempty"` - // AdditionalScrapeConfigs allows specifying a key of a Secret containing - // additional Prometheus scrape configurations. Scrape configurations - // specified are appended to the configurations generated by the Prometheus - // Operator. Job configurations specified must have the form as specified - // in the official Prometheus documentation: - // https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config. - // As scrape configs are appended, the user is responsible to make sure it - // is valid. Note that using this feature may expose the possibility to - // break upgrades of Prometheus. It is advised to review Prometheus release - // notes to ensure that no incompatible scrape configs are going to break - // Prometheus after the upgrade. - AdditionalScrapeConfigs *v1.SecretKeySelector `json:"additionalScrapeConfigs,omitempty"` - // AdditionalAlertRelabelConfigs allows specifying a key of a Secret containing - // additional Prometheus alert relabel configurations. Alert relabel configurations - // specified are appended to the configurations generated by the Prometheus - // Operator. Alert relabel configurations specified must have the form as specified - // in the official Prometheus documentation: - // https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs. - // As alert relabel configs are appended, the user is responsible to make sure it - // is valid. Note that using this feature may expose the possibility to - // break upgrades of Prometheus. It is advised to review Prometheus release - // notes to ensure that no incompatible alert relabel configs are going to break - // Prometheus after the upgrade. - AdditionalAlertRelabelConfigs *v1.SecretKeySelector `json:"additionalAlertRelabelConfigs,omitempty"` - // AdditionalAlertManagerConfigs allows specifying a key of a Secret containing - // additional Prometheus AlertManager configurations. AlertManager configurations - // specified are appended to the configurations generated by the Prometheus - // Operator. Job configurations specified must have the form as specified - // in the official Prometheus documentation: - // https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alertmanager_config. - // As AlertManager configs are appended, the user is responsible to make sure it - // is valid. Note that using this feature may expose the possibility to - // break upgrades of Prometheus. It is advised to review Prometheus release - // notes to ensure that no incompatible AlertManager configs are going to break - // Prometheus after the upgrade. - AdditionalAlertManagerConfigs *v1.SecretKeySelector `json:"additionalAlertManagerConfigs,omitempty"` - // APIServerConfig allows specifying a host and auth methods to access apiserver. - // If left empty, Prometheus is assumed to run inside of the cluster - // and will discover API servers automatically and use the pod's CA certificate - // and bearer token file at /var/run/secrets/kubernetes.io/serviceaccount/. - APIServerConfig *APIServerConfig `json:"apiserverConfig,omitempty"` - // Thanos configuration allows configuring various aspects of a Prometheus - // server in a Thanos environment. - // - // This section is experimental, it may change significantly without - // deprecation notice in any release. - // - // This is experimental and may change significantly without backward - // compatibility in any release. - Thanos *ThanosSpec `json:"thanos,omitempty"` - // Priority class assigned to the Pods - PriorityClassName string `json:"priorityClassName,omitempty"` -} - -// PrometheusStatus is the most recent observed status of the Prometheus cluster. Read-only. Not -// included when requesting from the apiserver, only from the Prometheus -// Operator API itself. More info: -// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status -// +k8s:openapi-gen=true -type PrometheusStatus struct { - // Represents whether any actions on the underlaying managed objects are - // being performed. Only delete actions will be performed. - Paused bool `json:"paused"` - // Total number of non-terminated pods targeted by this Prometheus deployment - // (their labels match the selector). - Replicas int32 `json:"replicas"` - // Total number of non-terminated pods targeted by this Prometheus deployment - // that have the desired version spec. - UpdatedReplicas int32 `json:"updatedReplicas"` - // Total number of available pods (ready for at least minReadySeconds) - // targeted by this Prometheus deployment. - AvailableReplicas int32 `json:"availableReplicas"` - // Total number of unavailable pods targeted by this Prometheus deployment. - UnavailableReplicas int32 `json:"unavailableReplicas"` -} - -// AlertingSpec defines parameters for alerting configuration of Prometheus servers. -// +k8s:openapi-gen=true -type AlertingSpec struct { - // AlertmanagerEndpoints Prometheus should fire alerts against. - Alertmanagers []AlertmanagerEndpoints `json:"alertmanagers"` -} - -// StorageSpec defines the configured storage for a group Prometheus servers. -// If neither `emptyDir` nor `volumeClaimTemplate` is specified, then by default an [EmptyDir](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir) will be used. -// +k8s:openapi-gen=true -type StorageSpec struct { - // EmptyDirVolumeSource to be used by the Prometheus StatefulSets. If specified, used in place of any volumeClaimTemplate. More - // info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir - EmptyDir *v1.EmptyDirVolumeSource `json:"emptyDir,omitempty"` - // A PVC spec to be used by the Prometheus StatefulSets. - VolumeClaimTemplate v1.PersistentVolumeClaim `json:"volumeClaimTemplate,omitempty"` -} - -// QuerySpec defines the query command line flags when starting Prometheus. -// +k8s:openapi-gen=true -type QuerySpec struct { - // The delta difference allowed for retrieving metrics during expression evaluations. - LookbackDelta *string `json:"lookbackDelta,omitempty"` - // Number of concurrent queries that can be run at once. - MaxConcurrency *int32 `json:"maxConcurrency,omitempty"` - // Maximum time a query may take before being aborted. - Timeout *string `json:"timeout,omitempty"` -} - -// ThanosSpec defines parameters for a Prometheus server within a Thanos deployment. -// +k8s:openapi-gen=true -type ThanosSpec struct { - // Peers is a DNS name for Thanos to discover peers through. - Peers *string `json:"peers,omitempty"` - // Image if specified has precedence over baseImage, tag and sha - // combinations. Specifying the version is still necessary to ensure the - // Prometheus Operator knows what version of Thanos is being - // configured. - Image *string `json:"image,omitempty"` - // Version describes the version of Thanos to use. - Version *string `json:"version,omitempty"` - // Tag of Thanos sidecar container image to be deployed. Defaults to the value of `version`. - // Version is ignored if Tag is set. - Tag *string `json:"tag,omitempty"` - // SHA of Thanos container image to be deployed. Defaults to the value of `version`. - // Similar to a tag, but the SHA explicitly deploys an immutable container image. - // Version and Tag are ignored if SHA is set. - SHA *string `json:"sha,omitempty"` - // Thanos base image if other than default. - BaseImage *string `json:"baseImage,omitempty"` - // Resources defines the resource requirements for the Thanos sidecar. - // If not provided, no requests/limits will be set - Resources v1.ResourceRequirements `json:"resources,omitempty"` - // Deprecated: GCS should be configured with an ObjectStorageConfig secret - // starting with Thanos v0.2.0. This field will be removed. - GCS *ThanosGCSSpec `json:"gcs,omitempty"` - // Deprecated: S3 should be configured with an ObjectStorageConfig secret - // starting with Thanos v0.2.0. This field will be removed. - S3 *ThanosS3Spec `json:"s3,omitempty"` - // ObjectStorageConfig configures object storage in Thanos. - ObjectStorageConfig *v1.SecretKeySelector `json:"objectStorageConfig,omitempty"` - // Explicit (external) host:port address to advertise for gRPC StoreAPI in gossip cluster. - // If empty, 'grpc-address' will be used. - GrpcAdvertiseAddress *string `json:"grpcAdvertiseAddress,omitempty"` - // Explicit (external) ip:port address to advertise for gossip in gossip cluster. - // Used internally for membership only. - ClusterAdvertiseAddress *string `json:"clusterAdvertiseAddress,omitempty"` -} - -// Deprecated: ThanosGCSSpec should be configured with an ObjectStorageConfig -// secret starting with Thanos v0.2.0. ThanosGCSSpec will be removed. -// -// +k8s:openapi-gen=true -type ThanosGCSSpec struct { - // Google Cloud Storage bucket name for stored blocks. If empty it won't - // store any block inside Google Cloud Storage. - Bucket *string `json:"bucket,omitempty"` - // Secret to access our Bucket. - SecretKey *v1.SecretKeySelector `json:"credentials,omitempty"` -} - -// Deprecated: ThanosS3Spec should be configured with an ObjectStorageConfig -// secret starting with Thanos v0.2.0. ThanosS3Spec will be removed. -// -// +k8s:openapi-gen=true -type ThanosS3Spec struct { - // S3-Compatible API bucket name for stored blocks. - Bucket *string `json:"bucket,omitempty"` - // S3-Compatible API endpoint for stored blocks. - Endpoint *string `json:"endpoint,omitempty"` - // AccessKey for an S3-Compatible API. - AccessKey *v1.SecretKeySelector `json:"accessKey,omitempty"` - // SecretKey for an S3-Compatible API. - SecretKey *v1.SecretKeySelector `json:"secretKey,omitempty"` - // Whether to use an insecure connection with an S3-Compatible API. - Insecure *bool `json:"insecure,omitempty"` - // Whether to use S3 Signature Version 2; otherwise Signature Version 4 will be used. - SignatureVersion2 *bool `json:"signatureVersion2,omitempty"` - // Whether to use Server Side Encryption - EncryptSSE *bool `json:"encryptsse,omitempty"` -} - -// RemoteWriteSpec defines the remote_write configuration for prometheus. -// +k8s:openapi-gen=true -type RemoteWriteSpec struct { - //The URL of the endpoint to send samples to. - URL string `json:"url"` - //Timeout for requests to the remote write endpoint. - RemoteTimeout string `json:"remoteTimeout,omitempty"` - //The list of remote write relabel configurations. - WriteRelabelConfigs []RelabelConfig `json:"writeRelabelConfigs,omitempty"` - //BasicAuth for the URL. - BasicAuth *BasicAuth `json:"basicAuth,omitempty"` - // File to read bearer token for remote write. - BearerToken string `json:"bearerToken,omitempty"` - // File to read bearer token for remote write. - BearerTokenFile string `json:"bearerTokenFile,omitempty"` - // TLS Config to use for remote write. - TLSConfig *TLSConfig `json:"tlsConfig,omitempty"` - //Optional ProxyURL - ProxyURL string `json:"proxyUrl,omitempty"` - // QueueConfig allows tuning of the remote write queue parameters. - QueueConfig *QueueConfig `json:"queueConfig,omitempty"` -} - -// QueueConfig allows the tuning of remote_write queue_config parameters. This object -// is referenced in the RemoteWriteSpec object. -// +k8s:openapi-gen=true -type QueueConfig struct { - // Capacity is the number of samples to buffer per shard before we start dropping them. - Capacity int `json:"capacity,omitempty"` - // MinShards is the minimum number of shards, i.e. amount of concurrency. - MinShards int `json:"minShards,omitempty"` - // MaxShards is the maximum number of shards, i.e. amount of concurrency. - MaxShards int `json:"maxShards,omitempty"` - // MaxSamplesPerSend is the maximum number of samples per send. - MaxSamplesPerSend int `json:"maxSamplesPerSend,omitempty"` - // BatchSendDeadline is the maximum time a sample will wait in buffer. - BatchSendDeadline string `json:"batchSendDeadline,omitempty"` - // MaxRetries is the maximum number of times to retry a batch on recoverable errors. - MaxRetries int `json:"maxRetries,omitempty"` - // MinBackoff is the initial retry delay. Gets doubled for every retry. - MinBackoff string `json:"minBackoff,omitempty"` - // MaxBackoff is the maximum retry delay. - MaxBackoff string `json:"maxBackoff,omitempty"` -} - -// RemoteReadSpec defines the remote_read configuration for prometheus. -// +k8s:openapi-gen=true -type RemoteReadSpec struct { - //The URL of the endpoint to send samples to. - URL string `json:"url"` - //An optional list of equality matchers which have to be present - // in a selector to query the remote read endpoint. - RequiredMatchers map[string]string `json:"requiredMatchers,omitempty"` - //Timeout for requests to the remote read endpoint. - RemoteTimeout string `json:"remoteTimeout,omitempty"` - //Whether reads should be made for queries for time ranges that - // the local storage should have complete data for. - ReadRecent bool `json:"readRecent,omitempty"` - //BasicAuth for the URL. - BasicAuth *BasicAuth `json:"basicAuth,omitempty"` - // bearer token for remote read. - BearerToken string `json:"bearerToken,omitempty"` - // File to read bearer token for remote read. - BearerTokenFile string `json:"bearerTokenFile,omitempty"` - // TLS Config to use for remote read. - TLSConfig *TLSConfig `json:"tlsConfig,omitempty"` - //Optional ProxyURL - ProxyURL string `json:"proxyUrl,omitempty"` -} - -// RelabelConfig allows dynamic rewriting of the label set, being applied to samples before ingestion. -// It defines ``-section of Prometheus configuration. -// More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs -// +k8s:openapi-gen=true -type RelabelConfig struct { - //The source labels select values from existing labels. Their content is concatenated - //using the configured separator and matched against the configured regular expression - //for the replace, keep, and drop actions. - SourceLabels []string `json:"sourceLabels,omitempty"` - //Separator placed between concatenated source label values. default is ';'. - Separator string `json:"separator,omitempty"` - //Label to which the resulting value is written in a replace action. - //It is mandatory for replace actions. Regex capture groups are available. - TargetLabel string `json:"targetLabel,omitempty"` - //Regular expression against which the extracted value is matched. defailt is '(.*)' - Regex string `json:"regex,omitempty"` - // Modulus to take of the hash of the source label values. - Modulus uint64 `json:"modulus,omitempty"` - //Replacement value against which a regex replace is performed if the - //regular expression matches. Regex capture groups are available. Default is '$1' - Replacement string `json:"replacement,omitempty"` - // Action to perform based on regex matching. Default is 'replace' - Action string `json:"action,omitempty"` -} - -// APIServerConfig defines a host and auth methods to access apiserver. -// More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config -// +k8s:openapi-gen=true -type APIServerConfig struct { - // Host of apiserver. - // A valid string consisting of a hostname or IP followed by an optional port number - Host string `json:"host"` - // BasicAuth allow an endpoint to authenticate over basic authentication - BasicAuth *BasicAuth `json:"basicAuth,omitempty"` - // Bearer token for accessing apiserver. - BearerToken string `json:"bearerToken,omitempty"` - // File to read bearer token for accessing apiserver. - BearerTokenFile string `json:"bearerTokenFile,omitempty"` - // TLS Config to use for accessing apiserver. - TLSConfig *TLSConfig `json:"tlsConfig,omitempty"` -} - -// AlertmanagerEndpoints defines a selection of a single Endpoints object -// containing alertmanager IPs to fire alerts against. -// +k8s:openapi-gen=true -type AlertmanagerEndpoints struct { - // Namespace of Endpoints object. - Namespace string `json:"namespace"` - // Name of Endpoints object in Namespace. - Name string `json:"name"` - // Port the Alertmanager API is exposed on. - Port intstr.IntOrString `json:"port"` - // Scheme to use when firing alerts. - Scheme string `json:"scheme,omitempty"` - // Prefix for the HTTP path alerts are pushed to. - PathPrefix string `json:"pathPrefix,omitempty"` - // TLS Config to use for alertmanager connection. - TLSConfig *TLSConfig `json:"tlsConfig,omitempty"` - // BearerTokenFile to read from filesystem to use when authenticating to - // Alertmanager. - BearerTokenFile string `json:"bearerTokenFile,omitempty"` -} - -// ServiceMonitor defines monitoring for a set of services. -// +genclient -// +k8s:openapi-gen=true -type ServiceMonitor struct { - metav1.TypeMeta `json:",inline"` - // Standard object’s metadata. More info: - // https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata - // +k8s:openapi-gen=false - metav1.ObjectMeta `json:"metadata,omitempty"` - // Specification of desired Service selection for target discrovery by - // Prometheus. - Spec ServiceMonitorSpec `json:"spec"` -} - -// ServiceMonitorSpec contains specification parameters for a ServiceMonitor. -// +k8s:openapi-gen=true -type ServiceMonitorSpec struct { - // The label to use to retrieve the job name from. - JobLabel string `json:"jobLabel,omitempty"` - // TargetLabels transfers labels on the Kubernetes Service onto the target. - TargetLabels []string `json:"targetLabels,omitempty"` - // PodTargetLabels transfers labels on the Kubernetes Pod onto the target. - PodTargetLabels []string `json:"podTargetLabels,omitempty"` - // A list of endpoints allowed as part of this ServiceMonitor. - Endpoints []Endpoint `json:"endpoints"` - // Selector to select Endpoints objects. - Selector metav1.LabelSelector `json:"selector"` - // Selector to select which namespaces the Endpoints objects are discovered from. - NamespaceSelector NamespaceSelector `json:"namespaceSelector,omitempty"` - // SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. - SampleLimit uint64 `json:"sampleLimit,omitempty"` -} - -// Endpoint defines a scrapeable endpoint serving Prometheus metrics. -// +k8s:openapi-gen=true -type Endpoint struct { - // Name of the service port this endpoint refers to. Mutually exclusive with targetPort. - Port string `json:"port,omitempty"` - // Name or number of the target port of the endpoint. Mutually exclusive with port. - TargetPort *intstr.IntOrString `json:"targetPort,omitempty"` - // HTTP path to scrape for metrics. - Path string `json:"path,omitempty"` - // HTTP scheme to use for scraping. - Scheme string `json:"scheme,omitempty"` - // Optional HTTP URL parameters - Params map[string][]string `json:"params,omitempty"` - // Interval at which metrics should be scraped - Interval string `json:"interval,omitempty"` - // Timeout after which the scrape is ended - ScrapeTimeout string `json:"scrapeTimeout,omitempty"` - // TLS configuration to use when scraping the endpoint - TLSConfig *TLSConfig `json:"tlsConfig,omitempty"` - // File to read bearer token for scraping targets. - BearerTokenFile string `json:"bearerTokenFile,omitempty"` - // HonorLabels chooses the metric's labels on collisions with target labels. - HonorLabels bool `json:"honorLabels,omitempty"` - // BasicAuth allow an endpoint to authenticate over basic authentication - // More info: https://prometheus.io/docs/operating/configuration/#endpoints - BasicAuth *BasicAuth `json:"basicAuth,omitempty"` - // MetricRelabelConfigs to apply to samples before ingestion. - MetricRelabelConfigs []*RelabelConfig `json:"metricRelabelings,omitempty"` - // RelabelConfigs to apply to samples before ingestion. - // More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config - RelabelConfigs []*RelabelConfig `json:"relabelings,omitempty"` - // ProxyURL eg http://proxyserver:2195 Directs scrapes to proxy through this endpoint. - ProxyURL *string `json:"proxyUrl,omitempty"` -} - -// BasicAuth allow an endpoint to authenticate over basic authentication -// More info: https://prometheus.io/docs/operating/configuration/#endpoints -// +k8s:openapi-gen=true -type BasicAuth struct { - // The secret that contains the username for authenticate - Username v1.SecretKeySelector `json:"username,omitempty"` - // The secret that contains the password for authenticate - Password v1.SecretKeySelector `json:"password,omitempty"` -} - -// TLSConfig specifies TLS configuration parameters. -// +k8s:openapi-gen=true -type TLSConfig struct { - // The CA cert to use for the targets. - CAFile string `json:"caFile,omitempty"` - // The client cert file for the targets. - CertFile string `json:"certFile,omitempty"` - // The client key file for the targets. - KeyFile string `json:"keyFile,omitempty"` - // Used to verify the hostname for the targets. - ServerName string `json:"serverName,omitempty"` - // Disable target certificate validation. - InsecureSkipVerify bool `json:"insecureSkipVerify,omitempty"` -} - -// ServiceMonitorList is a list of ServiceMonitors. -// +k8s:openapi-gen=true -type ServiceMonitorList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata - // More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata - metav1.ListMeta `json:"metadata,omitempty"` - // List of ServiceMonitors - Items []*ServiceMonitor `json:"items"` -} - -// PrometheusRuleList is a list of PrometheusRules. -// +k8s:openapi-gen=true -type PrometheusRuleList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata - // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - metav1.ListMeta `json:"metadata,omitempty"` - // List of Rules - Items []*PrometheusRule `json:"items"` -} - -// PrometheusRule defines alerting rules for a Prometheus instance -// +genclient -// +k8s:openapi-gen=true -type PrometheusRule struct { - metav1.TypeMeta `json:",inline"` - // Standard object’s metadata. More info: - // http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata - metav1.ObjectMeta `json:"metadata,omitempty"` - // Specification of desired alerting rule definitions for Prometheus. - Spec PrometheusRuleSpec `json:"spec"` -} - -// PrometheusRuleSpec contains specification parameters for a Rule. -// +k8s:openapi-gen=true -type PrometheusRuleSpec struct { - // Content of Prometheus rule file - Groups []RuleGroup `json:"groups,omitempty"` -} - -// RuleGroup and Rule are copied instead of vendored because the -// upstream Prometheus struct definitions don't have json struct tags. - -// RuleGroup is a list of sequentially evaluated recording and alerting rules. -// +k8s:openapi-gen=true -type RuleGroup struct { - Name string `json:"name"` - Interval string `json:"interval,omitempty"` - Rules []Rule `json:"rules"` -} - -// Rule describes an alerting or recording rule. -// +k8s:openapi-gen=true -type Rule struct { - Record string `json:"record,omitempty"` - Alert string `json:"alert,omitempty"` - Expr intstr.IntOrString `json:"expr"` - For string `json:"for,omitempty"` - Labels map[string]string `json:"labels,omitempty"` - Annotations map[string]string `json:"annotations,omitempty"` -} - -// Alertmanager describes an Alertmanager cluster. -// +genclient -// +k8s:openapi-gen=true -type Alertmanager struct { - metav1.TypeMeta `json:",inline"` - // Standard object’s metadata. More info: - // https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata - // +k8s:openapi-gen=false - metav1.ObjectMeta `json:"metadata,omitempty"` - // Specification of the desired behavior of the Alertmanager cluster. More info: - // https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status - Spec AlertmanagerSpec `json:"spec"` - // Most recent observed status of the Alertmanager cluster. Read-only. Not - // included when requesting from the apiserver, only from the Prometheus - // Operator API itself. More info: - // https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status - Status *AlertmanagerStatus `json:"status,omitempty"` -} - -// AlertmanagerSpec is a specification of the desired behavior of the Alertmanager cluster. More info: -// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status -// +k8s:openapi-gen=true -type AlertmanagerSpec struct { - // Standard object’s metadata. More info: - // https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata - // Metadata Labels and Annotations gets propagated to the prometheus pods. - PodMetadata *metav1.ObjectMeta `json:"podMetadata,omitempty"` - // Image if specified has precedence over baseImage, tag and sha - // combinations. Specifying the version is still necessary to ensure the - // Prometheus Operator knows what version of Alertmanager is being - // configured. - Image *string `json:"image,omitempty"` - // Version the cluster should be on. - Version string `json:"version,omitempty"` - // Tag of Alertmanager container image to be deployed. Defaults to the value of `version`. - // Version is ignored if Tag is set. - Tag string `json:"tag,omitempty"` - // SHA of Alertmanager container image to be deployed. Defaults to the value of `version`. - // Similar to a tag, but the SHA explicitly deploys an immutable container image. - // Version and Tag are ignored if SHA is set. - SHA string `json:"sha,omitempty"` - // Base image that is used to deploy pods, without tag. - BaseImage string `json:"baseImage,omitempty"` - // An optional list of references to secrets in the same namespace - // to use for pulling prometheus and alertmanager images from registries - // see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod - ImagePullSecrets []v1.LocalObjectReference `json:"imagePullSecrets,omitempty"` - // Secrets is a list of Secrets in the same namespace as the Alertmanager - // object, which shall be mounted into the Alertmanager Pods. - // The Secrets are mounted into /etc/alertmanager/secrets/. - Secrets []string `json:"secrets,omitempty"` - // ConfigMaps is a list of ConfigMaps in the same namespace as the Alertmanager - // object, which shall be mounted into the Alertmanager Pods. - // The ConfigMaps are mounted into /etc/alertmanager/configmaps/. - ConfigMaps []string `json:"configMaps,omitempty"` - // Log level for Alertmanager to be configured with. - LogLevel string `json:"logLevel,omitempty"` - // Size is the expected size of the alertmanager cluster. The controller will - // eventually make the size of the running cluster equal to the expected - // size. - Replicas *int32 `json:"replicas,omitempty"` - // Time duration Alertmanager shall retain data for. Default is '120h', - // and must match the regular expression `[0-9]+(ms|s|m|h)` (milliseconds seconds minutes hours). - Retention string `json:"retention,omitempty"` - // Storage is the definition of how storage will be used by the Alertmanager - // instances. - Storage *StorageSpec `json:"storage,omitempty"` - // The external URL the Alertmanager instances will be available under. This is - // necessary to generate correct URLs. This is necessary if Alertmanager is not - // served from root of a DNS name. - ExternalURL string `json:"externalUrl,omitempty"` - // The route prefix Alertmanager registers HTTP handlers for. This is useful, - // if using ExternalURL and a proxy is rewriting HTTP routes of a request, - // and the actual ExternalURL is still true, but the server serves requests - // under a different route prefix. For example for use with `kubectl proxy`. - RoutePrefix string `json:"routePrefix,omitempty"` - // If set to true all actions on the underlaying managed objects are not - // goint to be performed, except for delete actions. - Paused bool `json:"paused,omitempty"` - // Define which Nodes the Pods are scheduled on. - NodeSelector map[string]string `json:"nodeSelector,omitempty"` - // Define resources requests and limits for single Pods. - Resources v1.ResourceRequirements `json:"resources,omitempty"` - // If specified, the pod's scheduling constraints. - Affinity *v1.Affinity `json:"affinity,omitempty"` - // If specified, the pod's tolerations. - Tolerations []v1.Toleration `json:"tolerations,omitempty"` - // SecurityContext holds pod-level security attributes and common container settings. - // This defaults to non root user with uid 1000 and gid 2000. - SecurityContext *v1.PodSecurityContext `json:"securityContext,omitempty"` - // ServiceAccountName is the name of the ServiceAccount to use to run the - // Prometheus Pods. - ServiceAccountName string `json:"serviceAccountName,omitempty"` - // ListenLocal makes the Alertmanager server listen on loopback, so that it - // does not bind against the Pod IP. Note this is only for the Alertmanager - // UI, not the gossip communication. - ListenLocal bool `json:"listenLocal,omitempty"` - // Containers allows injecting additional containers. This is meant to - // allow adding an authentication proxy to an Alertmanager pod. - Containers []v1.Container `json:"containers,omitempty"` - // Priority class assigned to the Pods - PriorityClassName string `json:"priorityClassName,omitempty"` - // AdditionalPeers allows injecting a set of additional Alertmanagers to peer with to form a highly available cluster. - AdditionalPeers []string `json:"additionalPeers,omitempty"` -} - -// AlertmanagerList is a list of Alertmanagers. -// +k8s:openapi-gen=true -type AlertmanagerList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata - // More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata - metav1.ListMeta `json:"metadata,omitempty"` - // List of Alertmanagers - Items []Alertmanager `json:"items"` -} - -// AlertmanagerStatus is the most recent observed status of the Alertmanager cluster. Read-only. Not -// included when requesting from the apiserver, only from the Prometheus -// Operator API itself. More info: -// https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status -// +k8s:openapi-gen=true -type AlertmanagerStatus struct { - // Represents whether any actions on the underlaying managed objects are - // being performed. Only delete actions will be performed. - Paused bool `json:"paused"` - // Total number of non-terminated pods targeted by this Alertmanager - // cluster (their labels match the selector). - Replicas int32 `json:"replicas"` - // Total number of non-terminated pods targeted by this Alertmanager - // cluster that have the desired version spec. - UpdatedReplicas int32 `json:"updatedReplicas"` - // Total number of available pods (ready for at least minReadySeconds) - // targeted by this Alertmanager cluster. - AvailableReplicas int32 `json:"availableReplicas"` - // Total number of unavailable pods targeted by this Alertmanager cluster. - UnavailableReplicas int32 `json:"unavailableReplicas"` -} - -// NamespaceSelector is a selector for selecting either all namespaces or a -// list of namespaces. -// +k8s:openapi-gen=true -type NamespaceSelector struct { - // Boolean describing whether all namespaces are selected in contrast to a - // list restricting them. - Any bool `json:"any,omitempty"` - // List of namespace names. - MatchNames []string `json:"matchNames,omitempty"` - - // TODO(fabxc): this should embed metav1.LabelSelector eventually. - // Currently the selector is only used for namespaces which require more complex - // implementation to support label selections. -} - -// /--rules.*/ command-line arguments -// +k8s:openapi-gen=true -type Rules struct { - Alert RulesAlert `json:"alert,omitempty"` -} - -// /--rules.alert.*/ command-line arguments -// +k8s:openapi-gen=true -type RulesAlert struct { - // Max time to tolerate prometheus outage for restoring 'for' state of alert. - ForOutageTolerance string `json:"forOutageTolerance,omitempty"` - // Minimum duration between alert and restored 'for' state. - // This is maintained only for alerts with configured 'for' time greater than grace period. - ForGracePeriod string `json:"forGracePeriod,omitempty"` - // Minimum amount of time to wait before resending an alert to Alertmanager. - ResendDelay string `json:"resendDelay,omitempty"` -} - -// DeepCopyObject implements the runtime.Object interface. -func (l *Alertmanager) DeepCopyObject() runtime.Object { - return l.DeepCopy() -} - -// DeepCopyObject implements the runtime.Object interface. -func (l *AlertmanagerList) DeepCopyObject() runtime.Object { - return l.DeepCopy() -} - -// DeepCopyObject implements the runtime.Object interface. -func (l *Prometheus) DeepCopyObject() runtime.Object { - return l.DeepCopy() -} - -// DeepCopyObject implements the runtime.Object interface. -func (l *PrometheusList) DeepCopyObject() runtime.Object { - return l.DeepCopy() -} - -// DeepCopyObject implements the runtime.Object interface. -func (l *ServiceMonitor) DeepCopyObject() runtime.Object { - return l.DeepCopy() -} - -// DeepCopyObject implements the runtime.Object interface. -func (l *ServiceMonitorList) DeepCopyObject() runtime.Object { - return l.DeepCopy() -} - -// DeepCopyObject implements the runtime.Object interface. -func (f *PrometheusRule) DeepCopyObject() runtime.Object { - return f.DeepCopy() -} - -// DeepCopyObject implements the runtime.Object interface. -func (l *PrometheusRuleList) DeepCopyObject() runtime.Object { - return l.DeepCopy() -} diff --git a/cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1/zz_generated.deepcopy.go b/cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1/zz_generated.deepcopy.go deleted file mode 100644 index a975b86..0000000 --- a/cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1/zz_generated.deepcopy.go +++ /dev/null @@ -1,1193 +0,0 @@ -// +build !ignore_autogenerated - -// Copyright 2018 The prometheus-operator Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1 - -import ( - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - intstr "k8s.io/apimachinery/pkg/util/intstr" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *APIServerConfig) DeepCopyInto(out *APIServerConfig) { - *out = *in - if in.BasicAuth != nil { - in, out := &in.BasicAuth, &out.BasicAuth - *out = new(BasicAuth) - (*in).DeepCopyInto(*out) - } - if in.TLSConfig != nil { - in, out := &in.TLSConfig, &out.TLSConfig - *out = new(TLSConfig) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerConfig. -func (in *APIServerConfig) DeepCopy() *APIServerConfig { - if in == nil { - return nil - } - out := new(APIServerConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AlertingSpec) DeepCopyInto(out *AlertingSpec) { - *out = *in - if in.Alertmanagers != nil { - in, out := &in.Alertmanagers, &out.Alertmanagers - *out = make([]AlertmanagerEndpoints, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertingSpec. -func (in *AlertingSpec) DeepCopy() *AlertingSpec { - if in == nil { - return nil - } - out := new(AlertingSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Alertmanager) DeepCopyInto(out *Alertmanager) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - if in.Status != nil { - in, out := &in.Status, &out.Status - *out = new(AlertmanagerStatus) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Alertmanager. -func (in *Alertmanager) DeepCopy() *Alertmanager { - if in == nil { - return nil - } - out := new(Alertmanager) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AlertmanagerEndpoints) DeepCopyInto(out *AlertmanagerEndpoints) { - *out = *in - out.Port = in.Port - if in.TLSConfig != nil { - in, out := &in.TLSConfig, &out.TLSConfig - *out = new(TLSConfig) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertmanagerEndpoints. -func (in *AlertmanagerEndpoints) DeepCopy() *AlertmanagerEndpoints { - if in == nil { - return nil - } - out := new(AlertmanagerEndpoints) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AlertmanagerList) DeepCopyInto(out *AlertmanagerList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Alertmanager, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertmanagerList. -func (in *AlertmanagerList) DeepCopy() *AlertmanagerList { - if in == nil { - return nil - } - out := new(AlertmanagerList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AlertmanagerSpec) DeepCopyInto(out *AlertmanagerSpec) { - *out = *in - if in.PodMetadata != nil { - in, out := &in.PodMetadata, &out.PodMetadata - *out = new(metav1.ObjectMeta) - (*in).DeepCopyInto(*out) - } - if in.Image != nil { - in, out := &in.Image, &out.Image - *out = new(string) - **out = **in - } - if in.ImagePullSecrets != nil { - in, out := &in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]corev1.LocalObjectReference, len(*in)) - copy(*out, *in) - } - if in.Secrets != nil { - in, out := &in.Secrets, &out.Secrets - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.ConfigMaps != nil { - in, out := &in.ConfigMaps, &out.ConfigMaps - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Replicas != nil { - in, out := &in.Replicas, &out.Replicas - *out = new(int32) - **out = **in - } - if in.Storage != nil { - in, out := &in.Storage, &out.Storage - *out = new(StorageSpec) - (*in).DeepCopyInto(*out) - } - if in.NodeSelector != nil { - in, out := &in.NodeSelector, &out.NodeSelector - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - in.Resources.DeepCopyInto(&out.Resources) - if in.Affinity != nil { - in, out := &in.Affinity, &out.Affinity - *out = new(corev1.Affinity) - (*in).DeepCopyInto(*out) - } - if in.Tolerations != nil { - in, out := &in.Tolerations, &out.Tolerations - *out = make([]corev1.Toleration, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.SecurityContext != nil { - in, out := &in.SecurityContext, &out.SecurityContext - *out = new(corev1.PodSecurityContext) - (*in).DeepCopyInto(*out) - } - if in.Containers != nil { - in, out := &in.Containers, &out.Containers - *out = make([]corev1.Container, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.AdditionalPeers != nil { - in, out := &in.AdditionalPeers, &out.AdditionalPeers - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertmanagerSpec. -func (in *AlertmanagerSpec) DeepCopy() *AlertmanagerSpec { - if in == nil { - return nil - } - out := new(AlertmanagerSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AlertmanagerStatus) DeepCopyInto(out *AlertmanagerStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertmanagerStatus. -func (in *AlertmanagerStatus) DeepCopy() *AlertmanagerStatus { - if in == nil { - return nil - } - out := new(AlertmanagerStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BasicAuth) DeepCopyInto(out *BasicAuth) { - *out = *in - in.Username.DeepCopyInto(&out.Username) - in.Password.DeepCopyInto(&out.Password) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicAuth. -func (in *BasicAuth) DeepCopy() *BasicAuth { - if in == nil { - return nil - } - out := new(BasicAuth) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CrdKind) DeepCopyInto(out *CrdKind) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrdKind. -func (in *CrdKind) DeepCopy() *CrdKind { - if in == nil { - return nil - } - out := new(CrdKind) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CrdKinds) DeepCopyInto(out *CrdKinds) { - *out = *in - out.Prometheus = in.Prometheus - out.Alertmanager = in.Alertmanager - out.ServiceMonitor = in.ServiceMonitor - out.PrometheusRule = in.PrometheusRule - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrdKinds. -func (in *CrdKinds) DeepCopy() *CrdKinds { - if in == nil { - return nil - } - out := new(CrdKinds) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Endpoint) DeepCopyInto(out *Endpoint) { - *out = *in - if in.TargetPort != nil { - in, out := &in.TargetPort, &out.TargetPort - *out = new(intstr.IntOrString) - **out = **in - } - if in.Params != nil { - in, out := &in.Params, &out.Params - *out = make(map[string][]string, len(*in)) - for key, val := range *in { - var outVal []string - if val == nil { - (*out)[key] = nil - } else { - in, out := &val, &outVal - *out = make([]string, len(*in)) - copy(*out, *in) - } - (*out)[key] = outVal - } - } - if in.TLSConfig != nil { - in, out := &in.TLSConfig, &out.TLSConfig - *out = new(TLSConfig) - **out = **in - } - if in.BasicAuth != nil { - in, out := &in.BasicAuth, &out.BasicAuth - *out = new(BasicAuth) - (*in).DeepCopyInto(*out) - } - if in.MetricRelabelConfigs != nil { - in, out := &in.MetricRelabelConfigs, &out.MetricRelabelConfigs - *out = make([]*RelabelConfig, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(RelabelConfig) - (*in).DeepCopyInto(*out) - } - } - } - if in.RelabelConfigs != nil { - in, out := &in.RelabelConfigs, &out.RelabelConfigs - *out = make([]*RelabelConfig, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(RelabelConfig) - (*in).DeepCopyInto(*out) - } - } - } - if in.ProxyURL != nil { - in, out := &in.ProxyURL, &out.ProxyURL - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoint. -func (in *Endpoint) DeepCopy() *Endpoint { - if in == nil { - return nil - } - out := new(Endpoint) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NamespaceSelector) DeepCopyInto(out *NamespaceSelector) { - *out = *in - if in.MatchNames != nil { - in, out := &in.MatchNames, &out.MatchNames - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceSelector. -func (in *NamespaceSelector) DeepCopy() *NamespaceSelector { - if in == nil { - return nil - } - out := new(NamespaceSelector) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Prometheus) DeepCopyInto(out *Prometheus) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - if in.Status != nil { - in, out := &in.Status, &out.Status - *out = new(PrometheusStatus) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Prometheus. -func (in *Prometheus) DeepCopy() *Prometheus { - if in == nil { - return nil - } - out := new(Prometheus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PrometheusList) DeepCopyInto(out *PrometheusList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]*Prometheus, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(Prometheus) - (*in).DeepCopyInto(*out) - } - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusList. -func (in *PrometheusList) DeepCopy() *PrometheusList { - if in == nil { - return nil - } - out := new(PrometheusList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PrometheusRule) DeepCopyInto(out *PrometheusRule) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusRule. -func (in *PrometheusRule) DeepCopy() *PrometheusRule { - if in == nil { - return nil - } - out := new(PrometheusRule) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PrometheusRuleList) DeepCopyInto(out *PrometheusRuleList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]*PrometheusRule, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(PrometheusRule) - (*in).DeepCopyInto(*out) - } - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusRuleList. -func (in *PrometheusRuleList) DeepCopy() *PrometheusRuleList { - if in == nil { - return nil - } - out := new(PrometheusRuleList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PrometheusRuleSpec) DeepCopyInto(out *PrometheusRuleSpec) { - *out = *in - if in.Groups != nil { - in, out := &in.Groups, &out.Groups - *out = make([]RuleGroup, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusRuleSpec. -func (in *PrometheusRuleSpec) DeepCopy() *PrometheusRuleSpec { - if in == nil { - return nil - } - out := new(PrometheusRuleSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PrometheusSpec) DeepCopyInto(out *PrometheusSpec) { - *out = *in - if in.PodMetadata != nil { - in, out := &in.PodMetadata, &out.PodMetadata - *out = new(metav1.ObjectMeta) - (*in).DeepCopyInto(*out) - } - if in.ServiceMonitorSelector != nil { - in, out := &in.ServiceMonitorSelector, &out.ServiceMonitorSelector - *out = new(metav1.LabelSelector) - (*in).DeepCopyInto(*out) - } - if in.ServiceMonitorNamespaceSelector != nil { - in, out := &in.ServiceMonitorNamespaceSelector, &out.ServiceMonitorNamespaceSelector - *out = new(metav1.LabelSelector) - (*in).DeepCopyInto(*out) - } - if in.Image != nil { - in, out := &in.Image, &out.Image - *out = new(string) - **out = **in - } - if in.ImagePullSecrets != nil { - in, out := &in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]corev1.LocalObjectReference, len(*in)) - copy(*out, *in) - } - if in.Replicas != nil { - in, out := &in.Replicas, &out.Replicas - *out = new(int32) - **out = **in - } - out.Rules = in.Rules - if in.ExternalLabels != nil { - in, out := &in.ExternalLabels, &out.ExternalLabels - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Query != nil { - in, out := &in.Query, &out.Query - *out = new(QuerySpec) - (*in).DeepCopyInto(*out) - } - if in.Storage != nil { - in, out := &in.Storage, &out.Storage - *out = new(StorageSpec) - (*in).DeepCopyInto(*out) - } - if in.RuleSelector != nil { - in, out := &in.RuleSelector, &out.RuleSelector - *out = new(metav1.LabelSelector) - (*in).DeepCopyInto(*out) - } - if in.RuleNamespaceSelector != nil { - in, out := &in.RuleNamespaceSelector, &out.RuleNamespaceSelector - *out = new(metav1.LabelSelector) - (*in).DeepCopyInto(*out) - } - if in.Alerting != nil { - in, out := &in.Alerting, &out.Alerting - *out = new(AlertingSpec) - (*in).DeepCopyInto(*out) - } - in.Resources.DeepCopyInto(&out.Resources) - if in.NodeSelector != nil { - in, out := &in.NodeSelector, &out.NodeSelector - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Secrets != nil { - in, out := &in.Secrets, &out.Secrets - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.ConfigMaps != nil { - in, out := &in.ConfigMaps, &out.ConfigMaps - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Affinity != nil { - in, out := &in.Affinity, &out.Affinity - *out = new(corev1.Affinity) - (*in).DeepCopyInto(*out) - } - if in.Tolerations != nil { - in, out := &in.Tolerations, &out.Tolerations - *out = make([]corev1.Toleration, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.RemoteWrite != nil { - in, out := &in.RemoteWrite, &out.RemoteWrite - *out = make([]RemoteWriteSpec, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.RemoteRead != nil { - in, out := &in.RemoteRead, &out.RemoteRead - *out = make([]RemoteReadSpec, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.SecurityContext != nil { - in, out := &in.SecurityContext, &out.SecurityContext - *out = new(corev1.PodSecurityContext) - (*in).DeepCopyInto(*out) - } - if in.Containers != nil { - in, out := &in.Containers, &out.Containers - *out = make([]corev1.Container, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.AdditionalScrapeConfigs != nil { - in, out := &in.AdditionalScrapeConfigs, &out.AdditionalScrapeConfigs - *out = new(corev1.SecretKeySelector) - (*in).DeepCopyInto(*out) - } - if in.AdditionalAlertRelabelConfigs != nil { - in, out := &in.AdditionalAlertRelabelConfigs, &out.AdditionalAlertRelabelConfigs - *out = new(corev1.SecretKeySelector) - (*in).DeepCopyInto(*out) - } - if in.AdditionalAlertManagerConfigs != nil { - in, out := &in.AdditionalAlertManagerConfigs, &out.AdditionalAlertManagerConfigs - *out = new(corev1.SecretKeySelector) - (*in).DeepCopyInto(*out) - } - if in.APIServerConfig != nil { - in, out := &in.APIServerConfig, &out.APIServerConfig - *out = new(APIServerConfig) - (*in).DeepCopyInto(*out) - } - if in.Thanos != nil { - in, out := &in.Thanos, &out.Thanos - *out = new(ThanosSpec) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusSpec. -func (in *PrometheusSpec) DeepCopy() *PrometheusSpec { - if in == nil { - return nil - } - out := new(PrometheusSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PrometheusStatus) DeepCopyInto(out *PrometheusStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusStatus. -func (in *PrometheusStatus) DeepCopy() *PrometheusStatus { - if in == nil { - return nil - } - out := new(PrometheusStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *QuerySpec) DeepCopyInto(out *QuerySpec) { - *out = *in - if in.LookbackDelta != nil { - in, out := &in.LookbackDelta, &out.LookbackDelta - *out = new(string) - **out = **in - } - if in.MaxConcurrency != nil { - in, out := &in.MaxConcurrency, &out.MaxConcurrency - *out = new(int32) - **out = **in - } - if in.Timeout != nil { - in, out := &in.Timeout, &out.Timeout - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuerySpec. -func (in *QuerySpec) DeepCopy() *QuerySpec { - if in == nil { - return nil - } - out := new(QuerySpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *QueueConfig) DeepCopyInto(out *QueueConfig) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueConfig. -func (in *QueueConfig) DeepCopy() *QueueConfig { - if in == nil { - return nil - } - out := new(QueueConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RelabelConfig) DeepCopyInto(out *RelabelConfig) { - *out = *in - if in.SourceLabels != nil { - in, out := &in.SourceLabels, &out.SourceLabels - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RelabelConfig. -func (in *RelabelConfig) DeepCopy() *RelabelConfig { - if in == nil { - return nil - } - out := new(RelabelConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RemoteReadSpec) DeepCopyInto(out *RemoteReadSpec) { - *out = *in - if in.RequiredMatchers != nil { - in, out := &in.RequiredMatchers, &out.RequiredMatchers - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.BasicAuth != nil { - in, out := &in.BasicAuth, &out.BasicAuth - *out = new(BasicAuth) - (*in).DeepCopyInto(*out) - } - if in.TLSConfig != nil { - in, out := &in.TLSConfig, &out.TLSConfig - *out = new(TLSConfig) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteReadSpec. -func (in *RemoteReadSpec) DeepCopy() *RemoteReadSpec { - if in == nil { - return nil - } - out := new(RemoteReadSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RemoteWriteSpec) DeepCopyInto(out *RemoteWriteSpec) { - *out = *in - if in.WriteRelabelConfigs != nil { - in, out := &in.WriteRelabelConfigs, &out.WriteRelabelConfigs - *out = make([]RelabelConfig, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.BasicAuth != nil { - in, out := &in.BasicAuth, &out.BasicAuth - *out = new(BasicAuth) - (*in).DeepCopyInto(*out) - } - if in.TLSConfig != nil { - in, out := &in.TLSConfig, &out.TLSConfig - *out = new(TLSConfig) - **out = **in - } - if in.QueueConfig != nil { - in, out := &in.QueueConfig, &out.QueueConfig - *out = new(QueueConfig) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteWriteSpec. -func (in *RemoteWriteSpec) DeepCopy() *RemoteWriteSpec { - if in == nil { - return nil - } - out := new(RemoteWriteSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Rule) DeepCopyInto(out *Rule) { - *out = *in - out.Expr = in.Expr - if in.Labels != nil { - in, out := &in.Labels, &out.Labels - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Annotations != nil { - in, out := &in.Annotations, &out.Annotations - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Rule. -func (in *Rule) DeepCopy() *Rule { - if in == nil { - return nil - } - out := new(Rule) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RuleGroup) DeepCopyInto(out *RuleGroup) { - *out = *in - if in.Rules != nil { - in, out := &in.Rules, &out.Rules - *out = make([]Rule, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleGroup. -func (in *RuleGroup) DeepCopy() *RuleGroup { - if in == nil { - return nil - } - out := new(RuleGroup) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Rules) DeepCopyInto(out *Rules) { - *out = *in - out.Alert = in.Alert - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Rules. -func (in *Rules) DeepCopy() *Rules { - if in == nil { - return nil - } - out := new(Rules) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RulesAlert) DeepCopyInto(out *RulesAlert) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RulesAlert. -func (in *RulesAlert) DeepCopy() *RulesAlert { - if in == nil { - return nil - } - out := new(RulesAlert) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceMonitor) DeepCopyInto(out *ServiceMonitor) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceMonitor. -func (in *ServiceMonitor) DeepCopy() *ServiceMonitor { - if in == nil { - return nil - } - out := new(ServiceMonitor) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceMonitorList) DeepCopyInto(out *ServiceMonitorList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]*ServiceMonitor, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(ServiceMonitor) - (*in).DeepCopyInto(*out) - } - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceMonitorList. -func (in *ServiceMonitorList) DeepCopy() *ServiceMonitorList { - if in == nil { - return nil - } - out := new(ServiceMonitorList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceMonitorSpec) DeepCopyInto(out *ServiceMonitorSpec) { - *out = *in - if in.TargetLabels != nil { - in, out := &in.TargetLabels, &out.TargetLabels - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.PodTargetLabels != nil { - in, out := &in.PodTargetLabels, &out.PodTargetLabels - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Endpoints != nil { - in, out := &in.Endpoints, &out.Endpoints - *out = make([]Endpoint, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - in.Selector.DeepCopyInto(&out.Selector) - in.NamespaceSelector.DeepCopyInto(&out.NamespaceSelector) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceMonitorSpec. -func (in *ServiceMonitorSpec) DeepCopy() *ServiceMonitorSpec { - if in == nil { - return nil - } - out := new(ServiceMonitorSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StorageSpec) DeepCopyInto(out *StorageSpec) { - *out = *in - if in.EmptyDir != nil { - in, out := &in.EmptyDir, &out.EmptyDir - *out = new(corev1.EmptyDirVolumeSource) - (*in).DeepCopyInto(*out) - } - in.VolumeClaimTemplate.DeepCopyInto(&out.VolumeClaimTemplate) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageSpec. -func (in *StorageSpec) DeepCopy() *StorageSpec { - if in == nil { - return nil - } - out := new(StorageSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TLSConfig) DeepCopyInto(out *TLSConfig) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSConfig. -func (in *TLSConfig) DeepCopy() *TLSConfig { - if in == nil { - return nil - } - out := new(TLSConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ThanosGCSSpec) DeepCopyInto(out *ThanosGCSSpec) { - *out = *in - if in.Bucket != nil { - in, out := &in.Bucket, &out.Bucket - *out = new(string) - **out = **in - } - if in.SecretKey != nil { - in, out := &in.SecretKey, &out.SecretKey - *out = new(corev1.SecretKeySelector) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThanosGCSSpec. -func (in *ThanosGCSSpec) DeepCopy() *ThanosGCSSpec { - if in == nil { - return nil - } - out := new(ThanosGCSSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ThanosS3Spec) DeepCopyInto(out *ThanosS3Spec) { - *out = *in - if in.Bucket != nil { - in, out := &in.Bucket, &out.Bucket - *out = new(string) - **out = **in - } - if in.Endpoint != nil { - in, out := &in.Endpoint, &out.Endpoint - *out = new(string) - **out = **in - } - if in.AccessKey != nil { - in, out := &in.AccessKey, &out.AccessKey - *out = new(corev1.SecretKeySelector) - (*in).DeepCopyInto(*out) - } - if in.SecretKey != nil { - in, out := &in.SecretKey, &out.SecretKey - *out = new(corev1.SecretKeySelector) - (*in).DeepCopyInto(*out) - } - if in.Insecure != nil { - in, out := &in.Insecure, &out.Insecure - *out = new(bool) - **out = **in - } - if in.SignatureVersion2 != nil { - in, out := &in.SignatureVersion2, &out.SignatureVersion2 - *out = new(bool) - **out = **in - } - if in.EncryptSSE != nil { - in, out := &in.EncryptSSE, &out.EncryptSSE - *out = new(bool) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThanosS3Spec. -func (in *ThanosS3Spec) DeepCopy() *ThanosS3Spec { - if in == nil { - return nil - } - out := new(ThanosS3Spec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ThanosSpec) DeepCopyInto(out *ThanosSpec) { - *out = *in - if in.Peers != nil { - in, out := &in.Peers, &out.Peers - *out = new(string) - **out = **in - } - if in.Image != nil { - in, out := &in.Image, &out.Image - *out = new(string) - **out = **in - } - if in.Version != nil { - in, out := &in.Version, &out.Version - *out = new(string) - **out = **in - } - if in.Tag != nil { - in, out := &in.Tag, &out.Tag - *out = new(string) - **out = **in - } - if in.SHA != nil { - in, out := &in.SHA, &out.SHA - *out = new(string) - **out = **in - } - if in.BaseImage != nil { - in, out := &in.BaseImage, &out.BaseImage - *out = new(string) - **out = **in - } - in.Resources.DeepCopyInto(&out.Resources) - if in.GCS != nil { - in, out := &in.GCS, &out.GCS - *out = new(ThanosGCSSpec) - (*in).DeepCopyInto(*out) - } - if in.S3 != nil { - in, out := &in.S3, &out.S3 - *out = new(ThanosS3Spec) - (*in).DeepCopyInto(*out) - } - if in.ObjectStorageConfig != nil { - in, out := &in.ObjectStorageConfig, &out.ObjectStorageConfig - *out = new(corev1.SecretKeySelector) - (*in).DeepCopyInto(*out) - } - if in.GrpcAdvertiseAddress != nil { - in, out := &in.GrpcAdvertiseAddress, &out.GrpcAdvertiseAddress - *out = new(string) - **out = **in - } - if in.ClusterAdvertiseAddress != nil { - in, out := &in.ClusterAdvertiseAddress, &out.ClusterAdvertiseAddress - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThanosSpec. -func (in *ThanosSpec) DeepCopy() *ThanosSpec { - if in == nil { - return nil - } - out := new(ThanosSpec) - in.DeepCopyInto(out) - return out -} diff --git a/cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/client/versioned/scheme/doc.go b/cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/client/versioned/scheme/doc.go deleted file mode 100644 index ff89972..0000000 --- a/cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/client/versioned/scheme/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2018 The prometheus-operator Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by client-gen. DO NOT EDIT. - -// This package contains the scheme of the automatically generated clientset. -package scheme diff --git a/cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/client/versioned/scheme/register.go b/cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/client/versioned/scheme/register.go deleted file mode 100644 index 95edfc4..0000000 --- a/cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/client/versioned/scheme/register.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2018 The prometheus-operator Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by client-gen. DO NOT EDIT. - -package scheme - -import ( - monitoringv1 "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - schema "k8s.io/apimachinery/pkg/runtime/schema" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" -) - -var Scheme = runtime.NewScheme() -var Codecs = serializer.NewCodecFactory(Scheme) -var ParameterCodec = runtime.NewParameterCodec(Scheme) -var localSchemeBuilder = runtime.SchemeBuilder{ - monitoringv1.AddToScheme, -} - -// AddToScheme adds all types of this clientset into the given scheme. This allows composition -// of clientsets, like in: -// -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) -// -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) -// -// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types -// correctly. -var AddToScheme = localSchemeBuilder.AddToScheme - -func init() { - v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) - utilruntime.Must(AddToScheme(Scheme)) -} diff --git a/cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/alertmanager.go b/cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/alertmanager.go deleted file mode 100644 index 2b2474a..0000000 --- a/cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/alertmanager.go +++ /dev/null @@ -1,189 +0,0 @@ -// Copyright 2018 The prometheus-operator Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - "time" - - v1 "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1" - scheme "github.com/coreos/prometheus-operator/pkg/client/versioned/scheme" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// AlertmanagersGetter has a method to return a AlertmanagerInterface. -// A group's client should implement this interface. -type AlertmanagersGetter interface { - Alertmanagers(namespace string) AlertmanagerInterface -} - -// AlertmanagerInterface has methods to work with Alertmanager resources. -type AlertmanagerInterface interface { - Create(*v1.Alertmanager) (*v1.Alertmanager, error) - Update(*v1.Alertmanager) (*v1.Alertmanager, error) - UpdateStatus(*v1.Alertmanager) (*v1.Alertmanager, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.Alertmanager, error) - List(opts metav1.ListOptions) (*v1.AlertmanagerList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Alertmanager, err error) - AlertmanagerExpansion -} - -// alertmanagers implements AlertmanagerInterface -type alertmanagers struct { - client rest.Interface - ns string -} - -// newAlertmanagers returns a Alertmanagers -func newAlertmanagers(c *MonitoringV1Client, namespace string) *alertmanagers { - return &alertmanagers{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the alertmanager, and returns the corresponding alertmanager object, and an error if there is any. -func (c *alertmanagers) Get(name string, options metav1.GetOptions) (result *v1.Alertmanager, err error) { - result = &v1.Alertmanager{} - err = c.client.Get(). - Namespace(c.ns). - Resource("alertmanagers"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Alertmanagers that match those selectors. -func (c *alertmanagers) List(opts metav1.ListOptions) (result *v1.AlertmanagerList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.AlertmanagerList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("alertmanagers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested alertmanagers. -func (c *alertmanagers) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("alertmanagers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch() -} - -// Create takes the representation of a alertmanager and creates it. Returns the server's representation of the alertmanager, and an error, if there is any. -func (c *alertmanagers) Create(alertmanager *v1.Alertmanager) (result *v1.Alertmanager, err error) { - result = &v1.Alertmanager{} - err = c.client.Post(). - Namespace(c.ns). - Resource("alertmanagers"). - Body(alertmanager). - Do(). - Into(result) - return -} - -// Update takes the representation of a alertmanager and updates it. Returns the server's representation of the alertmanager, and an error, if there is any. -func (c *alertmanagers) Update(alertmanager *v1.Alertmanager) (result *v1.Alertmanager, err error) { - result = &v1.Alertmanager{} - err = c.client.Put(). - Namespace(c.ns). - Resource("alertmanagers"). - Name(alertmanager.Name). - Body(alertmanager). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *alertmanagers) UpdateStatus(alertmanager *v1.Alertmanager) (result *v1.Alertmanager, err error) { - result = &v1.Alertmanager{} - err = c.client.Put(). - Namespace(c.ns). - Resource("alertmanagers"). - Name(alertmanager.Name). - SubResource("status"). - Body(alertmanager). - Do(). - Into(result) - return -} - -// Delete takes name of the alertmanager and deletes it. Returns an error if one occurs. -func (c *alertmanagers) Delete(name string, options *metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("alertmanagers"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *alertmanagers) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("alertmanagers"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched alertmanager. -func (c *alertmanagers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Alertmanager, err error) { - result = &v1.Alertmanager{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("alertmanagers"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/doc.go b/cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/doc.go deleted file mode 100644 index 7938dcd..0000000 --- a/cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2018 The prometheus-operator Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1 diff --git a/cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/generated_expansion.go b/cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/generated_expansion.go deleted file mode 100644 index 88cf4ce..0000000 --- a/cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/generated_expansion.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2018 The prometheus-operator Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -type AlertmanagerExpansion interface{} - -type PrometheusExpansion interface{} - -type PrometheusRuleExpansion interface{} - -type ServiceMonitorExpansion interface{} diff --git a/cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/monitoring_client.go b/cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/monitoring_client.go deleted file mode 100644 index 6e5bac9..0000000 --- a/cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/monitoring_client.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright 2018 The prometheus-operator Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1" - "github.com/coreos/prometheus-operator/pkg/client/versioned/scheme" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - rest "k8s.io/client-go/rest" -) - -type MonitoringV1Interface interface { - RESTClient() rest.Interface - AlertmanagersGetter - PrometheusesGetter - PrometheusRulesGetter - ServiceMonitorsGetter -} - -// MonitoringV1Client is used to interact with features provided by the monitoring.coreos.com group. -type MonitoringV1Client struct { - restClient rest.Interface -} - -func (c *MonitoringV1Client) Alertmanagers(namespace string) AlertmanagerInterface { - return newAlertmanagers(c, namespace) -} - -func (c *MonitoringV1Client) Prometheuses(namespace string) PrometheusInterface { - return newPrometheuses(c, namespace) -} - -func (c *MonitoringV1Client) PrometheusRules(namespace string) PrometheusRuleInterface { - return newPrometheusRules(c, namespace) -} - -func (c *MonitoringV1Client) ServiceMonitors(namespace string) ServiceMonitorInterface { - return newServiceMonitors(c, namespace) -} - -// NewForConfig creates a new MonitoringV1Client for the given config. -func NewForConfig(c *rest.Config) (*MonitoringV1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &MonitoringV1Client{client}, nil -} - -// NewForConfigOrDie creates a new MonitoringV1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *MonitoringV1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new MonitoringV1Client for the given RESTClient. -func New(c rest.Interface) *MonitoringV1Client { - return &MonitoringV1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *MonitoringV1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/prometheus.go b/cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/prometheus.go deleted file mode 100644 index d06f348..0000000 --- a/cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/prometheus.go +++ /dev/null @@ -1,189 +0,0 @@ -// Copyright 2018 The prometheus-operator Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - "time" - - v1 "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1" - scheme "github.com/coreos/prometheus-operator/pkg/client/versioned/scheme" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// PrometheusesGetter has a method to return a PrometheusInterface. -// A group's client should implement this interface. -type PrometheusesGetter interface { - Prometheuses(namespace string) PrometheusInterface -} - -// PrometheusInterface has methods to work with Prometheus resources. -type PrometheusInterface interface { - Create(*v1.Prometheus) (*v1.Prometheus, error) - Update(*v1.Prometheus) (*v1.Prometheus, error) - UpdateStatus(*v1.Prometheus) (*v1.Prometheus, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.Prometheus, error) - List(opts metav1.ListOptions) (*v1.PrometheusList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Prometheus, err error) - PrometheusExpansion -} - -// prometheuses implements PrometheusInterface -type prometheuses struct { - client rest.Interface - ns string -} - -// newPrometheuses returns a Prometheuses -func newPrometheuses(c *MonitoringV1Client, namespace string) *prometheuses { - return &prometheuses{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the prometheus, and returns the corresponding prometheus object, and an error if there is any. -func (c *prometheuses) Get(name string, options metav1.GetOptions) (result *v1.Prometheus, err error) { - result = &v1.Prometheus{} - err = c.client.Get(). - Namespace(c.ns). - Resource("prometheuses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Prometheuses that match those selectors. -func (c *prometheuses) List(opts metav1.ListOptions) (result *v1.PrometheusList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.PrometheusList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("prometheuses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested prometheuses. -func (c *prometheuses) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("prometheuses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch() -} - -// Create takes the representation of a prometheus and creates it. Returns the server's representation of the prometheus, and an error, if there is any. -func (c *prometheuses) Create(prometheus *v1.Prometheus) (result *v1.Prometheus, err error) { - result = &v1.Prometheus{} - err = c.client.Post(). - Namespace(c.ns). - Resource("prometheuses"). - Body(prometheus). - Do(). - Into(result) - return -} - -// Update takes the representation of a prometheus and updates it. Returns the server's representation of the prometheus, and an error, if there is any. -func (c *prometheuses) Update(prometheus *v1.Prometheus) (result *v1.Prometheus, err error) { - result = &v1.Prometheus{} - err = c.client.Put(). - Namespace(c.ns). - Resource("prometheuses"). - Name(prometheus.Name). - Body(prometheus). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *prometheuses) UpdateStatus(prometheus *v1.Prometheus) (result *v1.Prometheus, err error) { - result = &v1.Prometheus{} - err = c.client.Put(). - Namespace(c.ns). - Resource("prometheuses"). - Name(prometheus.Name). - SubResource("status"). - Body(prometheus). - Do(). - Into(result) - return -} - -// Delete takes name of the prometheus and deletes it. Returns an error if one occurs. -func (c *prometheuses) Delete(name string, options *metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("prometheuses"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *prometheuses) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("prometheuses"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched prometheus. -func (c *prometheuses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Prometheus, err error) { - result = &v1.Prometheus{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("prometheuses"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/prometheusrule.go b/cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/prometheusrule.go deleted file mode 100644 index 8bce290..0000000 --- a/cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/prometheusrule.go +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright 2018 The prometheus-operator Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - "time" - - v1 "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1" - scheme "github.com/coreos/prometheus-operator/pkg/client/versioned/scheme" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// PrometheusRulesGetter has a method to return a PrometheusRuleInterface. -// A group's client should implement this interface. -type PrometheusRulesGetter interface { - PrometheusRules(namespace string) PrometheusRuleInterface -} - -// PrometheusRuleInterface has methods to work with PrometheusRule resources. -type PrometheusRuleInterface interface { - Create(*v1.PrometheusRule) (*v1.PrometheusRule, error) - Update(*v1.PrometheusRule) (*v1.PrometheusRule, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.PrometheusRule, error) - List(opts metav1.ListOptions) (*v1.PrometheusRuleList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PrometheusRule, err error) - PrometheusRuleExpansion -} - -// prometheusRules implements PrometheusRuleInterface -type prometheusRules struct { - client rest.Interface - ns string -} - -// newPrometheusRules returns a PrometheusRules -func newPrometheusRules(c *MonitoringV1Client, namespace string) *prometheusRules { - return &prometheusRules{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the prometheusRule, and returns the corresponding prometheusRule object, and an error if there is any. -func (c *prometheusRules) Get(name string, options metav1.GetOptions) (result *v1.PrometheusRule, err error) { - result = &v1.PrometheusRule{} - err = c.client.Get(). - Namespace(c.ns). - Resource("prometheusrules"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PrometheusRules that match those selectors. -func (c *prometheusRules) List(opts metav1.ListOptions) (result *v1.PrometheusRuleList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.PrometheusRuleList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("prometheusrules"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested prometheusRules. -func (c *prometheusRules) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("prometheusrules"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch() -} - -// Create takes the representation of a prometheusRule and creates it. Returns the server's representation of the prometheusRule, and an error, if there is any. -func (c *prometheusRules) Create(prometheusRule *v1.PrometheusRule) (result *v1.PrometheusRule, err error) { - result = &v1.PrometheusRule{} - err = c.client.Post(). - Namespace(c.ns). - Resource("prometheusrules"). - Body(prometheusRule). - Do(). - Into(result) - return -} - -// Update takes the representation of a prometheusRule and updates it. Returns the server's representation of the prometheusRule, and an error, if there is any. -func (c *prometheusRules) Update(prometheusRule *v1.PrometheusRule) (result *v1.PrometheusRule, err error) { - result = &v1.PrometheusRule{} - err = c.client.Put(). - Namespace(c.ns). - Resource("prometheusrules"). - Name(prometheusRule.Name). - Body(prometheusRule). - Do(). - Into(result) - return -} - -// Delete takes name of the prometheusRule and deletes it. Returns an error if one occurs. -func (c *prometheusRules) Delete(name string, options *metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("prometheusrules"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *prometheusRules) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("prometheusrules"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched prometheusRule. -func (c *prometheusRules) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PrometheusRule, err error) { - result = &v1.PrometheusRule{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("prometheusrules"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/servicemonitor.go b/cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/servicemonitor.go deleted file mode 100644 index d058774..0000000 --- a/cmd/bpa-operator/vendor/github.com/coreos/prometheus-operator/pkg/client/versioned/typed/monitoring/v1/servicemonitor.go +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright 2018 The prometheus-operator Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - "time" - - v1 "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1" - scheme "github.com/coreos/prometheus-operator/pkg/client/versioned/scheme" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// ServiceMonitorsGetter has a method to return a ServiceMonitorInterface. -// A group's client should implement this interface. -type ServiceMonitorsGetter interface { - ServiceMonitors(namespace string) ServiceMonitorInterface -} - -// ServiceMonitorInterface has methods to work with ServiceMonitor resources. -type ServiceMonitorInterface interface { - Create(*v1.ServiceMonitor) (*v1.ServiceMonitor, error) - Update(*v1.ServiceMonitor) (*v1.ServiceMonitor, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.ServiceMonitor, error) - List(opts metav1.ListOptions) (*v1.ServiceMonitorList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ServiceMonitor, err error) - ServiceMonitorExpansion -} - -// serviceMonitors implements ServiceMonitorInterface -type serviceMonitors struct { - client rest.Interface - ns string -} - -// newServiceMonitors returns a ServiceMonitors -func newServiceMonitors(c *MonitoringV1Client, namespace string) *serviceMonitors { - return &serviceMonitors{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the serviceMonitor, and returns the corresponding serviceMonitor object, and an error if there is any. -func (c *serviceMonitors) Get(name string, options metav1.GetOptions) (result *v1.ServiceMonitor, err error) { - result = &v1.ServiceMonitor{} - err = c.client.Get(). - Namespace(c.ns). - Resource("servicemonitors"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ServiceMonitors that match those selectors. -func (c *serviceMonitors) List(opts metav1.ListOptions) (result *v1.ServiceMonitorList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.ServiceMonitorList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("servicemonitors"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested serviceMonitors. -func (c *serviceMonitors) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("servicemonitors"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch() -} - -// Create takes the representation of a serviceMonitor and creates it. Returns the server's representation of the serviceMonitor, and an error, if there is any. -func (c *serviceMonitors) Create(serviceMonitor *v1.ServiceMonitor) (result *v1.ServiceMonitor, err error) { - result = &v1.ServiceMonitor{} - err = c.client.Post(). - Namespace(c.ns). - Resource("servicemonitors"). - Body(serviceMonitor). - Do(). - Into(result) - return -} - -// Update takes the representation of a serviceMonitor and updates it. Returns the server's representation of the serviceMonitor, and an error, if there is any. -func (c *serviceMonitors) Update(serviceMonitor *v1.ServiceMonitor) (result *v1.ServiceMonitor, err error) { - result = &v1.ServiceMonitor{} - err = c.client.Put(). - Namespace(c.ns). - Resource("servicemonitors"). - Name(serviceMonitor.Name). - Body(serviceMonitor). - Do(). - Into(result) - return -} - -// Delete takes name of the serviceMonitor and deletes it. Returns an error if one occurs. -func (c *serviceMonitors) Delete(name string, options *metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("servicemonitors"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *serviceMonitors) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("servicemonitors"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched serviceMonitor. -func (c *serviceMonitors) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ServiceMonitor, err error) { - result = &v1.ServiceMonitor{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("servicemonitors"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/cmd/bpa-operator/vendor/github.com/davecgh/go-spew/LICENSE b/cmd/bpa-operator/vendor/github.com/davecgh/go-spew/LICENSE deleted file mode 100644 index bc52e96..0000000 --- a/cmd/bpa-operator/vendor/github.com/davecgh/go-spew/LICENSE +++ /dev/null @@ -1,15 +0,0 @@ -ISC License - -Copyright (c) 2012-2016 Dave Collins - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/cmd/bpa-operator/vendor/github.com/davecgh/go-spew/spew/bypass.go b/cmd/bpa-operator/vendor/github.com/davecgh/go-spew/spew/bypass.go deleted file mode 100644 index 7929947..0000000 --- a/cmd/bpa-operator/vendor/github.com/davecgh/go-spew/spew/bypass.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright (c) 2015-2016 Dave Collins -// -// Permission to use, copy, modify, and distribute this software for any -// purpose with or without fee is hereby granted, provided that the above -// copyright notice and this permission notice appear in all copies. -// -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -// NOTE: Due to the following build constraints, this file will only be compiled -// when the code is not running on Google App Engine, compiled by GopherJS, and -// "-tags safe" is not added to the go build command line. The "disableunsafe" -// tag is deprecated and thus should not be used. -// Go versions prior to 1.4 are disabled because they use a different layout -// for interfaces which make the implementation of unsafeReflectValue more complex. -// +build !js,!appengine,!safe,!disableunsafe,go1.4 - -package spew - -import ( - "reflect" - "unsafe" -) - -const ( - // UnsafeDisabled is a build-time constant which specifies whether or - // not access to the unsafe package is available. - UnsafeDisabled = false - - // ptrSize is the size of a pointer on the current arch. - ptrSize = unsafe.Sizeof((*byte)(nil)) -) - -type flag uintptr - -var ( - // flagRO indicates whether the value field of a reflect.Value - // is read-only. - flagRO flag - - // flagAddr indicates whether the address of the reflect.Value's - // value may be taken. - flagAddr flag -) - -// flagKindMask holds the bits that make up the kind -// part of the flags field. In all the supported versions, -// it is in the lower 5 bits. -const flagKindMask = flag(0x1f) - -// Different versions of Go have used different -// bit layouts for the flags type. This table -// records the known combinations. -var okFlags = []struct { - ro, addr flag -}{{ - // From Go 1.4 to 1.5 - ro: 1 << 5, - addr: 1 << 7, -}, { - // Up to Go tip. - ro: 1<<5 | 1<<6, - addr: 1 << 8, -}} - -var flagValOffset = func() uintptr { - field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") - if !ok { - panic("reflect.Value has no flag field") - } - return field.Offset -}() - -// flagField returns a pointer to the flag field of a reflect.Value. -func flagField(v *reflect.Value) *flag { - return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset)) -} - -// unsafeReflectValue converts the passed reflect.Value into a one that bypasses -// the typical safety restrictions preventing access to unaddressable and -// unexported data. It works by digging the raw pointer to the underlying -// value out of the protected value and generating a new unprotected (unsafe) -// reflect.Value to it. -// -// This allows us to check for implementations of the Stringer and error -// interfaces to be used for pretty printing ordinarily unaddressable and -// inaccessible values such as unexported struct fields. -func unsafeReflectValue(v reflect.Value) reflect.Value { - if !v.IsValid() || (v.CanInterface() && v.CanAddr()) { - return v - } - flagFieldPtr := flagField(&v) - *flagFieldPtr &^= flagRO - *flagFieldPtr |= flagAddr - return v -} - -// Sanity checks against future reflect package changes -// to the type or semantics of the Value.flag field. -func init() { - field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") - if !ok { - panic("reflect.Value has no flag field") - } - if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() { - panic("reflect.Value flag field has changed kind") - } - type t0 int - var t struct { - A t0 - // t0 will have flagEmbedRO set. - t0 - // a will have flagStickyRO set - a t0 - } - vA := reflect.ValueOf(t).FieldByName("A") - va := reflect.ValueOf(t).FieldByName("a") - vt0 := reflect.ValueOf(t).FieldByName("t0") - - // Infer flagRO from the difference between the flags - // for the (otherwise identical) fields in t. - flagPublic := *flagField(&vA) - flagWithRO := *flagField(&va) | *flagField(&vt0) - flagRO = flagPublic ^ flagWithRO - - // Infer flagAddr from the difference between a value - // taken from a pointer and not. - vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A") - flagNoPtr := *flagField(&vA) - flagPtr := *flagField(&vPtrA) - flagAddr = flagNoPtr ^ flagPtr - - // Check that the inferred flags tally with one of the known versions. - for _, f := range okFlags { - if flagRO == f.ro && flagAddr == f.addr { - return - } - } - panic("reflect.Value read-only flag has changed semantics") -} diff --git a/cmd/bpa-operator/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/cmd/bpa-operator/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go deleted file mode 100644 index 205c28d..0000000 --- a/cmd/bpa-operator/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (c) 2015-2016 Dave Collins -// -// Permission to use, copy, modify, and distribute this software for any -// purpose with or without fee is hereby granted, provided that the above -// copyright notice and this permission notice appear in all copies. -// -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -// NOTE: Due to the following build constraints, this file will only be compiled -// when the code is running on Google App Engine, compiled by GopherJS, or -// "-tags safe" is added to the go build command line. The "disableunsafe" -// tag is deprecated and thus should not be used. -// +build js appengine safe disableunsafe !go1.4 - -package spew - -import "reflect" - -const ( - // UnsafeDisabled is a build-time constant which specifies whether or - // not access to the unsafe package is available. - UnsafeDisabled = true -) - -// unsafeReflectValue typically converts the passed reflect.Value into a one -// that bypasses the typical safety restrictions preventing access to -// unaddressable and unexported data. However, doing this relies on access to -// the unsafe package. This is a stub version which simply returns the passed -// reflect.Value when the unsafe package is not available. -func unsafeReflectValue(v reflect.Value) reflect.Value { - return v -} diff --git a/cmd/bpa-operator/vendor/github.com/davecgh/go-spew/spew/common.go b/cmd/bpa-operator/vendor/github.com/davecgh/go-spew/spew/common.go deleted file mode 100644 index 1be8ce9..0000000 --- a/cmd/bpa-operator/vendor/github.com/davecgh/go-spew/spew/common.go +++ /dev/null @@ -1,341 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "io" - "reflect" - "sort" - "strconv" -) - -// Some constants in the form of bytes to avoid string overhead. This mirrors -// the technique used in the fmt package. -var ( - panicBytes = []byte("(PANIC=") - plusBytes = []byte("+") - iBytes = []byte("i") - trueBytes = []byte("true") - falseBytes = []byte("false") - interfaceBytes = []byte("(interface {})") - commaNewlineBytes = []byte(",\n") - newlineBytes = []byte("\n") - openBraceBytes = []byte("{") - openBraceNewlineBytes = []byte("{\n") - closeBraceBytes = []byte("}") - asteriskBytes = []byte("*") - colonBytes = []byte(":") - colonSpaceBytes = []byte(": ") - openParenBytes = []byte("(") - closeParenBytes = []byte(")") - spaceBytes = []byte(" ") - pointerChainBytes = []byte("->") - nilAngleBytes = []byte("") - maxNewlineBytes = []byte("\n") - maxShortBytes = []byte("") - circularBytes = []byte("") - circularShortBytes = []byte("") - invalidAngleBytes = []byte("") - openBracketBytes = []byte("[") - closeBracketBytes = []byte("]") - percentBytes = []byte("%") - precisionBytes = []byte(".") - openAngleBytes = []byte("<") - closeAngleBytes = []byte(">") - openMapBytes = []byte("map[") - closeMapBytes = []byte("]") - lenEqualsBytes = []byte("len=") - capEqualsBytes = []byte("cap=") -) - -// hexDigits is used to map a decimal value to a hex digit. -var hexDigits = "0123456789abcdef" - -// catchPanic handles any panics that might occur during the handleMethods -// calls. -func catchPanic(w io.Writer, v reflect.Value) { - if err := recover(); err != nil { - w.Write(panicBytes) - fmt.Fprintf(w, "%v", err) - w.Write(closeParenBytes) - } -} - -// handleMethods attempts to call the Error and String methods on the underlying -// type the passed reflect.Value represents and outputes the result to Writer w. -// -// It handles panics in any called methods by catching and displaying the error -// as the formatted value. -func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) { - // We need an interface to check if the type implements the error or - // Stringer interface. However, the reflect package won't give us an - // interface on certain things like unexported struct fields in order - // to enforce visibility rules. We use unsafe, when it's available, - // to bypass these restrictions since this package does not mutate the - // values. - if !v.CanInterface() { - if UnsafeDisabled { - return false - } - - v = unsafeReflectValue(v) - } - - // Choose whether or not to do error and Stringer interface lookups against - // the base type or a pointer to the base type depending on settings. - // Technically calling one of these methods with a pointer receiver can - // mutate the value, however, types which choose to satisify an error or - // Stringer interface with a pointer receiver should not be mutating their - // state inside these interface methods. - if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() { - v = unsafeReflectValue(v) - } - if v.CanAddr() { - v = v.Addr() - } - - // Is it an error or Stringer? - switch iface := v.Interface().(type) { - case error: - defer catchPanic(w, v) - if cs.ContinueOnMethod { - w.Write(openParenBytes) - w.Write([]byte(iface.Error())) - w.Write(closeParenBytes) - w.Write(spaceBytes) - return false - } - - w.Write([]byte(iface.Error())) - return true - - case fmt.Stringer: - defer catchPanic(w, v) - if cs.ContinueOnMethod { - w.Write(openParenBytes) - w.Write([]byte(iface.String())) - w.Write(closeParenBytes) - w.Write(spaceBytes) - return false - } - w.Write([]byte(iface.String())) - return true - } - return false -} - -// printBool outputs a boolean value as true or false to Writer w. -func printBool(w io.Writer, val bool) { - if val { - w.Write(trueBytes) - } else { - w.Write(falseBytes) - } -} - -// printInt outputs a signed integer value to Writer w. -func printInt(w io.Writer, val int64, base int) { - w.Write([]byte(strconv.FormatInt(val, base))) -} - -// printUint outputs an unsigned integer value to Writer w. -func printUint(w io.Writer, val uint64, base int) { - w.Write([]byte(strconv.FormatUint(val, base))) -} - -// printFloat outputs a floating point value using the specified precision, -// which is expected to be 32 or 64bit, to Writer w. -func printFloat(w io.Writer, val float64, precision int) { - w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision))) -} - -// printComplex outputs a complex value using the specified float precision -// for the real and imaginary parts to Writer w. -func printComplex(w io.Writer, c complex128, floatPrecision int) { - r := real(c) - w.Write(openParenBytes) - w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision))) - i := imag(c) - if i >= 0 { - w.Write(plusBytes) - } - w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision))) - w.Write(iBytes) - w.Write(closeParenBytes) -} - -// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x' -// prefix to Writer w. -func printHexPtr(w io.Writer, p uintptr) { - // Null pointer. - num := uint64(p) - if num == 0 { - w.Write(nilAngleBytes) - return - } - - // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix - buf := make([]byte, 18) - - // It's simpler to construct the hex string right to left. - base := uint64(16) - i := len(buf) - 1 - for num >= base { - buf[i] = hexDigits[num%base] - num /= base - i-- - } - buf[i] = hexDigits[num] - - // Add '0x' prefix. - i-- - buf[i] = 'x' - i-- - buf[i] = '0' - - // Strip unused leading bytes. - buf = buf[i:] - w.Write(buf) -} - -// valuesSorter implements sort.Interface to allow a slice of reflect.Value -// elements to be sorted. -type valuesSorter struct { - values []reflect.Value - strings []string // either nil or same len and values - cs *ConfigState -} - -// newValuesSorter initializes a valuesSorter instance, which holds a set of -// surrogate keys on which the data should be sorted. It uses flags in -// ConfigState to decide if and how to populate those surrogate keys. -func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface { - vs := &valuesSorter{values: values, cs: cs} - if canSortSimply(vs.values[0].Kind()) { - return vs - } - if !cs.DisableMethods { - vs.strings = make([]string, len(values)) - for i := range vs.values { - b := bytes.Buffer{} - if !handleMethods(cs, &b, vs.values[i]) { - vs.strings = nil - break - } - vs.strings[i] = b.String() - } - } - if vs.strings == nil && cs.SpewKeys { - vs.strings = make([]string, len(values)) - for i := range vs.values { - vs.strings[i] = Sprintf("%#v", vs.values[i].Interface()) - } - } - return vs -} - -// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted -// directly, or whether it should be considered for sorting by surrogate keys -// (if the ConfigState allows it). -func canSortSimply(kind reflect.Kind) bool { - // This switch parallels valueSortLess, except for the default case. - switch kind { - case reflect.Bool: - return true - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - return true - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - return true - case reflect.Float32, reflect.Float64: - return true - case reflect.String: - return true - case reflect.Uintptr: - return true - case reflect.Array: - return true - } - return false -} - -// Len returns the number of values in the slice. It is part of the -// sort.Interface implementation. -func (s *valuesSorter) Len() int { - return len(s.values) -} - -// Swap swaps the values at the passed indices. It is part of the -// sort.Interface implementation. -func (s *valuesSorter) Swap(i, j int) { - s.values[i], s.values[j] = s.values[j], s.values[i] - if s.strings != nil { - s.strings[i], s.strings[j] = s.strings[j], s.strings[i] - } -} - -// valueSortLess returns whether the first value should sort before the second -// value. It is used by valueSorter.Less as part of the sort.Interface -// implementation. -func valueSortLess(a, b reflect.Value) bool { - switch a.Kind() { - case reflect.Bool: - return !a.Bool() && b.Bool() - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - return a.Int() < b.Int() - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - return a.Uint() < b.Uint() - case reflect.Float32, reflect.Float64: - return a.Float() < b.Float() - case reflect.String: - return a.String() < b.String() - case reflect.Uintptr: - return a.Uint() < b.Uint() - case reflect.Array: - // Compare the contents of both arrays. - l := a.Len() - for i := 0; i < l; i++ { - av := a.Index(i) - bv := b.Index(i) - if av.Interface() == bv.Interface() { - continue - } - return valueSortLess(av, bv) - } - } - return a.String() < b.String() -} - -// Less returns whether the value at index i should sort before the -// value at index j. It is part of the sort.Interface implementation. -func (s *valuesSorter) Less(i, j int) bool { - if s.strings == nil { - return valueSortLess(s.values[i], s.values[j]) - } - return s.strings[i] < s.strings[j] -} - -// sortValues is a sort function that handles both native types and any type that -// can be converted to error or Stringer. Other inputs are sorted according to -// their Value.String() value to ensure display stability. -func sortValues(values []reflect.Value, cs *ConfigState) { - if len(values) == 0 { - return - } - sort.Sort(newValuesSorter(values, cs)) -} diff --git a/cmd/bpa-operator/vendor/github.com/davecgh/go-spew/spew/config.go b/cmd/bpa-operator/vendor/github.com/davecgh/go-spew/spew/config.go deleted file mode 100644 index 2e3d22f..0000000 --- a/cmd/bpa-operator/vendor/github.com/davecgh/go-spew/spew/config.go +++ /dev/null @@ -1,306 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "io" - "os" -) - -// ConfigState houses the configuration options used by spew to format and -// display values. There is a global instance, Config, that is used to control -// all top-level Formatter and Dump functionality. Each ConfigState instance -// provides methods equivalent to the top-level functions. -// -// The zero value for ConfigState provides no indentation. You would typically -// want to set it to a space or a tab. -// -// Alternatively, you can use NewDefaultConfig to get a ConfigState instance -// with default settings. See the documentation of NewDefaultConfig for default -// values. -type ConfigState struct { - // Indent specifies the string to use for each indentation level. The - // global config instance that all top-level functions use set this to a - // single space by default. If you would like more indentation, you might - // set this to a tab with "\t" or perhaps two spaces with " ". - Indent string - - // MaxDepth controls the maximum number of levels to descend into nested - // data structures. The default, 0, means there is no limit. - // - // NOTE: Circular data structures are properly detected, so it is not - // necessary to set this value unless you specifically want to limit deeply - // nested data structures. - MaxDepth int - - // DisableMethods specifies whether or not error and Stringer interfaces are - // invoked for types that implement them. - DisableMethods bool - - // DisablePointerMethods specifies whether or not to check for and invoke - // error and Stringer interfaces on types which only accept a pointer - // receiver when the current type is not a pointer. - // - // NOTE: This might be an unsafe action since calling one of these methods - // with a pointer receiver could technically mutate the value, however, - // in practice, types which choose to satisify an error or Stringer - // interface with a pointer receiver should not be mutating their state - // inside these interface methods. As a result, this option relies on - // access to the unsafe package, so it will not have any effect when - // running in environments without access to the unsafe package such as - // Google App Engine or with the "safe" build tag specified. - DisablePointerMethods bool - - // DisablePointerAddresses specifies whether to disable the printing of - // pointer addresses. This is useful when diffing data structures in tests. - DisablePointerAddresses bool - - // DisableCapacities specifies whether to disable the printing of capacities - // for arrays, slices, maps and channels. This is useful when diffing - // data structures in tests. - DisableCapacities bool - - // ContinueOnMethod specifies whether or not recursion should continue once - // a custom error or Stringer interface is invoked. The default, false, - // means it will print the results of invoking the custom error or Stringer - // interface and return immediately instead of continuing to recurse into - // the internals of the data type. - // - // NOTE: This flag does not have any effect if method invocation is disabled - // via the DisableMethods or DisablePointerMethods options. - ContinueOnMethod bool - - // SortKeys specifies map keys should be sorted before being printed. Use - // this to have a more deterministic, diffable output. Note that only - // native types (bool, int, uint, floats, uintptr and string) and types - // that support the error or Stringer interfaces (if methods are - // enabled) are supported, with other types sorted according to the - // reflect.Value.String() output which guarantees display stability. - SortKeys bool - - // SpewKeys specifies that, as a last resort attempt, map keys should - // be spewed to strings and sorted by those strings. This is only - // considered if SortKeys is true. - SpewKeys bool -} - -// Config is the active configuration of the top-level functions. -// The configuration can be changed by modifying the contents of spew.Config. -var Config = ConfigState{Indent: " "} - -// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the formatted string as a value that satisfies error. See NewFormatter -// for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) { - return fmt.Errorf(format, c.convertArgs(a)...) -} - -// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprint(w, c.convertArgs(a)...) -} - -// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { - return fmt.Fprintf(w, format, c.convertArgs(a)...) -} - -// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it -// passed with a Formatter interface returned by c.NewFormatter. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprintln(w, c.convertArgs(a)...) -} - -// Print is a wrapper for fmt.Print that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Print(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Print(a ...interface{}) (n int, err error) { - return fmt.Print(c.convertArgs(a)...) -} - -// Printf is a wrapper for fmt.Printf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) { - return fmt.Printf(format, c.convertArgs(a)...) -} - -// Println is a wrapper for fmt.Println that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Println(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Println(a ...interface{}) (n int, err error) { - return fmt.Println(c.convertArgs(a)...) -} - -// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprint(a ...interface{}) string { - return fmt.Sprint(c.convertArgs(a)...) -} - -// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprintf(format string, a ...interface{}) string { - return fmt.Sprintf(format, c.convertArgs(a)...) -} - -// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it -// were passed with a Formatter interface returned by c.NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprintln(a ...interface{}) string { - return fmt.Sprintln(c.convertArgs(a)...) -} - -/* -NewFormatter returns a custom formatter that satisfies the fmt.Formatter -interface. As a result, it integrates cleanly with standard fmt package -printing functions. The formatter is useful for inline printing of smaller data -types similar to the standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Typically this function shouldn't be called directly. It is much easier to make -use of the custom formatter by calling one of the convenience functions such as -c.Printf, c.Println, or c.Printf. -*/ -func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter { - return newFormatter(c, v) -} - -// Fdump formats and displays the passed arguments to io.Writer w. It formats -// exactly the same as Dump. -func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) { - fdump(c, w, a...) -} - -/* -Dump displays the passed parameters to standard out with newlines, customizable -indentation, and additional debug information such as complete types and all -pointer addresses used to indirect to the final value. It provides the -following features over the built-in printing facilities provided by the fmt -package: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output - -The configuration options are controlled by modifying the public members -of c. See ConfigState for options documentation. - -See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to -get the formatted result as a string. -*/ -func (c *ConfigState) Dump(a ...interface{}) { - fdump(c, os.Stdout, a...) -} - -// Sdump returns a string with the passed arguments formatted exactly the same -// as Dump. -func (c *ConfigState) Sdump(a ...interface{}) string { - var buf bytes.Buffer - fdump(c, &buf, a...) - return buf.String() -} - -// convertArgs accepts a slice of arguments and returns a slice of the same -// length with each argument converted to a spew Formatter interface using -// the ConfigState associated with s. -func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) { - formatters = make([]interface{}, len(args)) - for index, arg := range args { - formatters[index] = newFormatter(c, arg) - } - return formatters -} - -// NewDefaultConfig returns a ConfigState with the following default settings. -// -// Indent: " " -// MaxDepth: 0 -// DisableMethods: false -// DisablePointerMethods: false -// ContinueOnMethod: false -// SortKeys: false -func NewDefaultConfig() *ConfigState { - return &ConfigState{Indent: " "} -} diff --git a/cmd/bpa-operator/vendor/github.com/davecgh/go-spew/spew/doc.go b/cmd/bpa-operator/vendor/github.com/davecgh/go-spew/spew/doc.go deleted file mode 100644 index aacaac6..0000000 --- a/cmd/bpa-operator/vendor/github.com/davecgh/go-spew/spew/doc.go +++ /dev/null @@ -1,211 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -/* -Package spew implements a deep pretty printer for Go data structures to aid in -debugging. - -A quick overview of the additional features spew provides over the built-in -printing facilities for Go data types are as follows: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output (only when using - Dump style) - -There are two different approaches spew allows for dumping Go data structures: - - * Dump style which prints with newlines, customizable indentation, - and additional debug information such as types and all pointer addresses - used to indirect to the final value - * A custom Formatter interface that integrates cleanly with the standard fmt - package and replaces %v, %+v, %#v, and %#+v to provide inline printing - similar to the default %v while providing the additional functionality - outlined above and passing unsupported format verbs such as %x and %q - along to fmt - -Quick Start - -This section demonstrates how to quickly get started with spew. See the -sections below for further details on formatting and configuration options. - -To dump a variable with full newlines, indentation, type, and pointer -information use Dump, Fdump, or Sdump: - spew.Dump(myVar1, myVar2, ...) - spew.Fdump(someWriter, myVar1, myVar2, ...) - str := spew.Sdump(myVar1, myVar2, ...) - -Alternatively, if you would prefer to use format strings with a compacted inline -printing style, use the convenience wrappers Printf, Fprintf, etc with -%v (most compact), %+v (adds pointer addresses), %#v (adds types), or -%#+v (adds types and pointer addresses): - spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - -Configuration Options - -Configuration of spew is handled by fields in the ConfigState type. For -convenience, all of the top-level functions use a global state available -via the spew.Config global. - -It is also possible to create a ConfigState instance that provides methods -equivalent to the top-level functions. This allows concurrent configuration -options. See the ConfigState documentation for more details. - -The following configuration options are available: - * Indent - String to use for each indentation level for Dump functions. - It is a single space by default. A popular alternative is "\t". - - * MaxDepth - Maximum number of levels to descend into nested data structures. - There is no limit by default. - - * DisableMethods - Disables invocation of error and Stringer interface methods. - Method invocation is enabled by default. - - * DisablePointerMethods - Disables invocation of error and Stringer interface methods on types - which only accept pointer receivers from non-pointer variables. - Pointer method invocation is enabled by default. - - * DisablePointerAddresses - DisablePointerAddresses specifies whether to disable the printing of - pointer addresses. This is useful when diffing data structures in tests. - - * DisableCapacities - DisableCapacities specifies whether to disable the printing of - capacities for arrays, slices, maps and channels. This is useful when - diffing data structures in tests. - - * ContinueOnMethod - Enables recursion into types after invoking error and Stringer interface - methods. Recursion after method invocation is disabled by default. - - * SortKeys - Specifies map keys should be sorted before being printed. Use - this to have a more deterministic, diffable output. Note that - only native types (bool, int, uint, floats, uintptr and string) - and types which implement error or Stringer interfaces are - supported with other types sorted according to the - reflect.Value.String() output which guarantees display - stability. Natural map order is used by default. - - * SpewKeys - Specifies that, as a last resort attempt, map keys should be - spewed to strings and sorted by those strings. This is only - considered if SortKeys is true. - -Dump Usage - -Simply call spew.Dump with a list of variables you want to dump: - - spew.Dump(myVar1, myVar2, ...) - -You may also call spew.Fdump if you would prefer to output to an arbitrary -io.Writer. For example, to dump to standard error: - - spew.Fdump(os.Stderr, myVar1, myVar2, ...) - -A third option is to call spew.Sdump to get the formatted output as a string: - - str := spew.Sdump(myVar1, myVar2, ...) - -Sample Dump Output - -See the Dump example for details on the setup of the types and variables being -shown here. - - (main.Foo) { - unexportedField: (*main.Bar)(0xf84002e210)({ - flag: (main.Flag) flagTwo, - data: (uintptr) - }), - ExportedField: (map[interface {}]interface {}) (len=1) { - (string) (len=3) "one": (bool) true - } - } - -Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C -command as shown. - ([]uint8) (len=32 cap=32) { - 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | - 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| - 00000020 31 32 |12| - } - -Custom Formatter - -Spew provides a custom formatter that implements the fmt.Formatter interface -so that it integrates cleanly with standard fmt package printing functions. The -formatter is useful for inline printing of smaller data types similar to the -standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Custom Formatter Usage - -The simplest way to make use of the spew custom formatter is to call one of the -convenience functions such as spew.Printf, spew.Println, or spew.Printf. The -functions have syntax you are most likely already familiar with: - - spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - spew.Println(myVar, myVar2) - spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - -See the Index for the full list convenience functions. - -Sample Formatter Output - -Double pointer to a uint8: - %v: <**>5 - %+v: <**>(0xf8400420d0->0xf8400420c8)5 - %#v: (**uint8)5 - %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 - -Pointer to circular struct with a uint8 field and a pointer to itself: - %v: <*>{1 <*>} - %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} - %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} - %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} - -See the Printf example for details on the setup of variables being shown -here. - -Errors - -Since it is possible for custom Stringer/error interfaces to panic, spew -detects them and handles them internally by printing the panic information -inline with the output. Since spew is intended to provide deep pretty printing -capabilities on structures, it intentionally does not return any errors. -*/ -package spew diff --git a/cmd/bpa-operator/vendor/github.com/davecgh/go-spew/spew/dump.go b/cmd/bpa-operator/vendor/github.com/davecgh/go-spew/spew/dump.go deleted file mode 100644 index f78d89f..0000000 --- a/cmd/bpa-operator/vendor/github.com/davecgh/go-spew/spew/dump.go +++ /dev/null @@ -1,509 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "encoding/hex" - "fmt" - "io" - "os" - "reflect" - "regexp" - "strconv" - "strings" -) - -var ( - // uint8Type is a reflect.Type representing a uint8. It is used to - // convert cgo types to uint8 slices for hexdumping. - uint8Type = reflect.TypeOf(uint8(0)) - - // cCharRE is a regular expression that matches a cgo char. - // It is used to detect character arrays to hexdump them. - cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`) - - // cUnsignedCharRE is a regular expression that matches a cgo unsigned - // char. It is used to detect unsigned character arrays to hexdump - // them. - cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`) - - // cUint8tCharRE is a regular expression that matches a cgo uint8_t. - // It is used to detect uint8_t arrays to hexdump them. - cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`) -) - -// dumpState contains information about the state of a dump operation. -type dumpState struct { - w io.Writer - depth int - pointers map[uintptr]int - ignoreNextType bool - ignoreNextIndent bool - cs *ConfigState -} - -// indent performs indentation according to the depth level and cs.Indent -// option. -func (d *dumpState) indent() { - if d.ignoreNextIndent { - d.ignoreNextIndent = false - return - } - d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth)) -} - -// unpackValue returns values inside of non-nil interfaces when possible. -// This is useful for data types like structs, arrays, slices, and maps which -// can contain varying types packed inside an interface. -func (d *dumpState) unpackValue(v reflect.Value) reflect.Value { - if v.Kind() == reflect.Interface && !v.IsNil() { - v = v.Elem() - } - return v -} - -// dumpPtr handles formatting of pointers by indirecting them as necessary. -func (d *dumpState) dumpPtr(v reflect.Value) { - // Remove pointers at or below the current depth from map used to detect - // circular refs. - for k, depth := range d.pointers { - if depth >= d.depth { - delete(d.pointers, k) - } - } - - // Keep list of all dereferenced pointers to show later. - pointerChain := make([]uintptr, 0) - - // Figure out how many levels of indirection there are by dereferencing - // pointers and unpacking interfaces down the chain while detecting circular - // references. - nilFound := false - cycleFound := false - indirects := 0 - ve := v - for ve.Kind() == reflect.Ptr { - if ve.IsNil() { - nilFound = true - break - } - indirects++ - addr := ve.Pointer() - pointerChain = append(pointerChain, addr) - if pd, ok := d.pointers[addr]; ok && pd < d.depth { - cycleFound = true - indirects-- - break - } - d.pointers[addr] = d.depth - - ve = ve.Elem() - if ve.Kind() == reflect.Interface { - if ve.IsNil() { - nilFound = true - break - } - ve = ve.Elem() - } - } - - // Display type information. - d.w.Write(openParenBytes) - d.w.Write(bytes.Repeat(asteriskBytes, indirects)) - d.w.Write([]byte(ve.Type().String())) - d.w.Write(closeParenBytes) - - // Display pointer information. - if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 { - d.w.Write(openParenBytes) - for i, addr := range pointerChain { - if i > 0 { - d.w.Write(pointerChainBytes) - } - printHexPtr(d.w, addr) - } - d.w.Write(closeParenBytes) - } - - // Display dereferenced value. - d.w.Write(openParenBytes) - switch { - case nilFound: - d.w.Write(nilAngleBytes) - - case cycleFound: - d.w.Write(circularBytes) - - default: - d.ignoreNextType = true - d.dump(ve) - } - d.w.Write(closeParenBytes) -} - -// dumpSlice handles formatting of arrays and slices. Byte (uint8 under -// reflection) arrays and slices are dumped in hexdump -C fashion. -func (d *dumpState) dumpSlice(v reflect.Value) { - // Determine whether this type should be hex dumped or not. Also, - // for types which should be hexdumped, try to use the underlying data - // first, then fall back to trying to convert them to a uint8 slice. - var buf []uint8 - doConvert := false - doHexDump := false - numEntries := v.Len() - if numEntries > 0 { - vt := v.Index(0).Type() - vts := vt.String() - switch { - // C types that need to be converted. - case cCharRE.MatchString(vts): - fallthrough - case cUnsignedCharRE.MatchString(vts): - fallthrough - case cUint8tCharRE.MatchString(vts): - doConvert = true - - // Try to use existing uint8 slices and fall back to converting - // and copying if that fails. - case vt.Kind() == reflect.Uint8: - // We need an addressable interface to convert the type - // to a byte slice. However, the reflect package won't - // give us an interface on certain things like - // unexported struct fields in order to enforce - // visibility rules. We use unsafe, when available, to - // bypass these restrictions since this package does not - // mutate the values. - vs := v - if !vs.CanInterface() || !vs.CanAddr() { - vs = unsafeReflectValue(vs) - } - if !UnsafeDisabled { - vs = vs.Slice(0, numEntries) - - // Use the existing uint8 slice if it can be - // type asserted. - iface := vs.Interface() - if slice, ok := iface.([]uint8); ok { - buf = slice - doHexDump = true - break - } - } - - // The underlying data needs to be converted if it can't - // be type asserted to a uint8 slice. - doConvert = true - } - - // Copy and convert the underlying type if needed. - if doConvert && vt.ConvertibleTo(uint8Type) { - // Convert and copy each element into a uint8 byte - // slice. - buf = make([]uint8, numEntries) - for i := 0; i < numEntries; i++ { - vv := v.Index(i) - buf[i] = uint8(vv.Convert(uint8Type).Uint()) - } - doHexDump = true - } - } - - // Hexdump the entire slice as needed. - if doHexDump { - indent := strings.Repeat(d.cs.Indent, d.depth) - str := indent + hex.Dump(buf) - str = strings.Replace(str, "\n", "\n"+indent, -1) - str = strings.TrimRight(str, d.cs.Indent) - d.w.Write([]byte(str)) - return - } - - // Recursively call dump for each item. - for i := 0; i < numEntries; i++ { - d.dump(d.unpackValue(v.Index(i))) - if i < (numEntries - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } -} - -// dump is the main workhorse for dumping a value. It uses the passed reflect -// value to figure out what kind of object we are dealing with and formats it -// appropriately. It is a recursive function, however circular data structures -// are detected and handled properly. -func (d *dumpState) dump(v reflect.Value) { - // Handle invalid reflect values immediately. - kind := v.Kind() - if kind == reflect.Invalid { - d.w.Write(invalidAngleBytes) - return - } - - // Handle pointers specially. - if kind == reflect.Ptr { - d.indent() - d.dumpPtr(v) - return - } - - // Print type information unless already handled elsewhere. - if !d.ignoreNextType { - d.indent() - d.w.Write(openParenBytes) - d.w.Write([]byte(v.Type().String())) - d.w.Write(closeParenBytes) - d.w.Write(spaceBytes) - } - d.ignoreNextType = false - - // Display length and capacity if the built-in len and cap functions - // work with the value's kind and the len/cap itself is non-zero. - valueLen, valueCap := 0, 0 - switch v.Kind() { - case reflect.Array, reflect.Slice, reflect.Chan: - valueLen, valueCap = v.Len(), v.Cap() - case reflect.Map, reflect.String: - valueLen = v.Len() - } - if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 { - d.w.Write(openParenBytes) - if valueLen != 0 { - d.w.Write(lenEqualsBytes) - printInt(d.w, int64(valueLen), 10) - } - if !d.cs.DisableCapacities && valueCap != 0 { - if valueLen != 0 { - d.w.Write(spaceBytes) - } - d.w.Write(capEqualsBytes) - printInt(d.w, int64(valueCap), 10) - } - d.w.Write(closeParenBytes) - d.w.Write(spaceBytes) - } - - // Call Stringer/error interfaces if they exist and the handle methods flag - // is enabled - if !d.cs.DisableMethods { - if (kind != reflect.Invalid) && (kind != reflect.Interface) { - if handled := handleMethods(d.cs, d.w, v); handled { - return - } - } - } - - switch kind { - case reflect.Invalid: - // Do nothing. We should never get here since invalid has already - // been handled above. - - case reflect.Bool: - printBool(d.w, v.Bool()) - - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - printInt(d.w, v.Int(), 10) - - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - printUint(d.w, v.Uint(), 10) - - case reflect.Float32: - printFloat(d.w, v.Float(), 32) - - case reflect.Float64: - printFloat(d.w, v.Float(), 64) - - case reflect.Complex64: - printComplex(d.w, v.Complex(), 32) - - case reflect.Complex128: - printComplex(d.w, v.Complex(), 64) - - case reflect.Slice: - if v.IsNil() { - d.w.Write(nilAngleBytes) - break - } - fallthrough - - case reflect.Array: - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - d.dumpSlice(v) - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.String: - d.w.Write([]byte(strconv.Quote(v.String()))) - - case reflect.Interface: - // The only time we should get here is for nil interfaces due to - // unpackValue calls. - if v.IsNil() { - d.w.Write(nilAngleBytes) - } - - case reflect.Ptr: - // Do nothing. We should never get here since pointers have already - // been handled above. - - case reflect.Map: - // nil maps should be indicated as different than empty maps - if v.IsNil() { - d.w.Write(nilAngleBytes) - break - } - - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - numEntries := v.Len() - keys := v.MapKeys() - if d.cs.SortKeys { - sortValues(keys, d.cs) - } - for i, key := range keys { - d.dump(d.unpackValue(key)) - d.w.Write(colonSpaceBytes) - d.ignoreNextIndent = true - d.dump(d.unpackValue(v.MapIndex(key))) - if i < (numEntries - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.Struct: - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - vt := v.Type() - numFields := v.NumField() - for i := 0; i < numFields; i++ { - d.indent() - vtf := vt.Field(i) - d.w.Write([]byte(vtf.Name)) - d.w.Write(colonSpaceBytes) - d.ignoreNextIndent = true - d.dump(d.unpackValue(v.Field(i))) - if i < (numFields - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.Uintptr: - printHexPtr(d.w, uintptr(v.Uint())) - - case reflect.UnsafePointer, reflect.Chan, reflect.Func: - printHexPtr(d.w, v.Pointer()) - - // There were not any other types at the time this code was written, but - // fall back to letting the default fmt package handle it in case any new - // types are added. - default: - if v.CanInterface() { - fmt.Fprintf(d.w, "%v", v.Interface()) - } else { - fmt.Fprintf(d.w, "%v", v.String()) - } - } -} - -// fdump is a helper function to consolidate the logic from the various public -// methods which take varying writers and config states. -func fdump(cs *ConfigState, w io.Writer, a ...interface{}) { - for _, arg := range a { - if arg == nil { - w.Write(interfaceBytes) - w.Write(spaceBytes) - w.Write(nilAngleBytes) - w.Write(newlineBytes) - continue - } - - d := dumpState{w: w, cs: cs} - d.pointers = make(map[uintptr]int) - d.dump(reflect.ValueOf(arg)) - d.w.Write(newlineBytes) - } -} - -// Fdump formats and displays the passed arguments to io.Writer w. It formats -// exactly the same as Dump. -func Fdump(w io.Writer, a ...interface{}) { - fdump(&Config, w, a...) -} - -// Sdump returns a string with the passed arguments formatted exactly the same -// as Dump. -func Sdump(a ...interface{}) string { - var buf bytes.Buffer - fdump(&Config, &buf, a...) - return buf.String() -} - -/* -Dump displays the passed parameters to standard out with newlines, customizable -indentation, and additional debug information such as complete types and all -pointer addresses used to indirect to the final value. It provides the -following features over the built-in printing facilities provided by the fmt -package: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output - -The configuration options are controlled by an exported package global, -spew.Config. See ConfigState for options documentation. - -See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to -get the formatted result as a string. -*/ -func Dump(a ...interface{}) { - fdump(&Config, os.Stdout, a...) -} diff --git a/cmd/bpa-operator/vendor/github.com/davecgh/go-spew/spew/format.go b/cmd/bpa-operator/vendor/github.com/davecgh/go-spew/spew/format.go deleted file mode 100644 index b04edb7..0000000 --- a/cmd/bpa-operator/vendor/github.com/davecgh/go-spew/spew/format.go +++ /dev/null @@ -1,419 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "reflect" - "strconv" - "strings" -) - -// supportedFlags is a list of all the character flags supported by fmt package. -const supportedFlags = "0-+# " - -// formatState implements the fmt.Formatter interface and contains information -// about the state of a formatting operation. The NewFormatter function can -// be used to get a new Formatter which can be used directly as arguments -// in standard fmt package printing calls. -type formatState struct { - value interface{} - fs fmt.State - depth int - pointers map[uintptr]int - ignoreNextType bool - cs *ConfigState -} - -// buildDefaultFormat recreates the original format string without precision -// and width information to pass in to fmt.Sprintf in the case of an -// unrecognized type. Unless new types are added to the language, this -// function won't ever be called. -func (f *formatState) buildDefaultFormat() (format string) { - buf := bytes.NewBuffer(percentBytes) - - for _, flag := range supportedFlags { - if f.fs.Flag(int(flag)) { - buf.WriteRune(flag) - } - } - - buf.WriteRune('v') - - format = buf.String() - return format -} - -// constructOrigFormat recreates the original format string including precision -// and width information to pass along to the standard fmt package. This allows -// automatic deferral of all format strings this package doesn't support. -func (f *formatState) constructOrigFormat(verb rune) (format string) { - buf := bytes.NewBuffer(percentBytes) - - for _, flag := range supportedFlags { - if f.fs.Flag(int(flag)) { - buf.WriteRune(flag) - } - } - - if width, ok := f.fs.Width(); ok { - buf.WriteString(strconv.Itoa(width)) - } - - if precision, ok := f.fs.Precision(); ok { - buf.Write(precisionBytes) - buf.WriteString(strconv.Itoa(precision)) - } - - buf.WriteRune(verb) - - format = buf.String() - return format -} - -// unpackValue returns values inside of non-nil interfaces when possible and -// ensures that types for values which have been unpacked from an interface -// are displayed when the show types flag is also set. -// This is useful for data types like structs, arrays, slices, and maps which -// can contain varying types packed inside an interface. -func (f *formatState) unpackValue(v reflect.Value) reflect.Value { - if v.Kind() == reflect.Interface { - f.ignoreNextType = false - if !v.IsNil() { - v = v.Elem() - } - } - return v -} - -// formatPtr handles formatting of pointers by indirecting them as necessary. -func (f *formatState) formatPtr(v reflect.Value) { - // Display nil if top level pointer is nil. - showTypes := f.fs.Flag('#') - if v.IsNil() && (!showTypes || f.ignoreNextType) { - f.fs.Write(nilAngleBytes) - return - } - - // Remove pointers at or below the current depth from map used to detect - // circular refs. - for k, depth := range f.pointers { - if depth >= f.depth { - delete(f.pointers, k) - } - } - - // Keep list of all dereferenced pointers to possibly show later. - pointerChain := make([]uintptr, 0) - - // Figure out how many levels of indirection there are by derferencing - // pointers and unpacking interfaces down the chain while detecting circular - // references. - nilFound := false - cycleFound := false - indirects := 0 - ve := v - for ve.Kind() == reflect.Ptr { - if ve.IsNil() { - nilFound = true - break - } - indirects++ - addr := ve.Pointer() - pointerChain = append(pointerChain, addr) - if pd, ok := f.pointers[addr]; ok && pd < f.depth { - cycleFound = true - indirects-- - break - } - f.pointers[addr] = f.depth - - ve = ve.Elem() - if ve.Kind() == reflect.Interface { - if ve.IsNil() { - nilFound = true - break - } - ve = ve.Elem() - } - } - - // Display type or indirection level depending on flags. - if showTypes && !f.ignoreNextType { - f.fs.Write(openParenBytes) - f.fs.Write(bytes.Repeat(asteriskBytes, indirects)) - f.fs.Write([]byte(ve.Type().String())) - f.fs.Write(closeParenBytes) - } else { - if nilFound || cycleFound { - indirects += strings.Count(ve.Type().String(), "*") - } - f.fs.Write(openAngleBytes) - f.fs.Write([]byte(strings.Repeat("*", indirects))) - f.fs.Write(closeAngleBytes) - } - - // Display pointer information depending on flags. - if f.fs.Flag('+') && (len(pointerChain) > 0) { - f.fs.Write(openParenBytes) - for i, addr := range pointerChain { - if i > 0 { - f.fs.Write(pointerChainBytes) - } - printHexPtr(f.fs, addr) - } - f.fs.Write(closeParenBytes) - } - - // Display dereferenced value. - switch { - case nilFound: - f.fs.Write(nilAngleBytes) - - case cycleFound: - f.fs.Write(circularShortBytes) - - default: - f.ignoreNextType = true - f.format(ve) - } -} - -// format is the main workhorse for providing the Formatter interface. It -// uses the passed reflect value to figure out what kind of object we are -// dealing with and formats it appropriately. It is a recursive function, -// however circular data structures are detected and handled properly. -func (f *formatState) format(v reflect.Value) { - // Handle invalid reflect values immediately. - kind := v.Kind() - if kind == reflect.Invalid { - f.fs.Write(invalidAngleBytes) - return - } - - // Handle pointers specially. - if kind == reflect.Ptr { - f.formatPtr(v) - return - } - - // Print type information unless already handled elsewhere. - if !f.ignoreNextType && f.fs.Flag('#') { - f.fs.Write(openParenBytes) - f.fs.Write([]byte(v.Type().String())) - f.fs.Write(closeParenBytes) - } - f.ignoreNextType = false - - // Call Stringer/error interfaces if they exist and the handle methods - // flag is enabled. - if !f.cs.DisableMethods { - if (kind != reflect.Invalid) && (kind != reflect.Interface) { - if handled := handleMethods(f.cs, f.fs, v); handled { - return - } - } - } - - switch kind { - case reflect.Invalid: - // Do nothing. We should never get here since invalid has already - // been handled above. - - case reflect.Bool: - printBool(f.fs, v.Bool()) - - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - printInt(f.fs, v.Int(), 10) - - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - printUint(f.fs, v.Uint(), 10) - - case reflect.Float32: - printFloat(f.fs, v.Float(), 32) - - case reflect.Float64: - printFloat(f.fs, v.Float(), 64) - - case reflect.Complex64: - printComplex(f.fs, v.Complex(), 32) - - case reflect.Complex128: - printComplex(f.fs, v.Complex(), 64) - - case reflect.Slice: - if v.IsNil() { - f.fs.Write(nilAngleBytes) - break - } - fallthrough - - case reflect.Array: - f.fs.Write(openBracketBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - numEntries := v.Len() - for i := 0; i < numEntries; i++ { - if i > 0 { - f.fs.Write(spaceBytes) - } - f.ignoreNextType = true - f.format(f.unpackValue(v.Index(i))) - } - } - f.depth-- - f.fs.Write(closeBracketBytes) - - case reflect.String: - f.fs.Write([]byte(v.String())) - - case reflect.Interface: - // The only time we should get here is for nil interfaces due to - // unpackValue calls. - if v.IsNil() { - f.fs.Write(nilAngleBytes) - } - - case reflect.Ptr: - // Do nothing. We should never get here since pointers have already - // been handled above. - - case reflect.Map: - // nil maps should be indicated as different than empty maps - if v.IsNil() { - f.fs.Write(nilAngleBytes) - break - } - - f.fs.Write(openMapBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - keys := v.MapKeys() - if f.cs.SortKeys { - sortValues(keys, f.cs) - } - for i, key := range keys { - if i > 0 { - f.fs.Write(spaceBytes) - } - f.ignoreNextType = true - f.format(f.unpackValue(key)) - f.fs.Write(colonBytes) - f.ignoreNextType = true - f.format(f.unpackValue(v.MapIndex(key))) - } - } - f.depth-- - f.fs.Write(closeMapBytes) - - case reflect.Struct: - numFields := v.NumField() - f.fs.Write(openBraceBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - vt := v.Type() - for i := 0; i < numFields; i++ { - if i > 0 { - f.fs.Write(spaceBytes) - } - vtf := vt.Field(i) - if f.fs.Flag('+') || f.fs.Flag('#') { - f.fs.Write([]byte(vtf.Name)) - f.fs.Write(colonBytes) - } - f.format(f.unpackValue(v.Field(i))) - } - } - f.depth-- - f.fs.Write(closeBraceBytes) - - case reflect.Uintptr: - printHexPtr(f.fs, uintptr(v.Uint())) - - case reflect.UnsafePointer, reflect.Chan, reflect.Func: - printHexPtr(f.fs, v.Pointer()) - - // There were not any other types at the time this code was written, but - // fall back to letting the default fmt package handle it if any get added. - default: - format := f.buildDefaultFormat() - if v.CanInterface() { - fmt.Fprintf(f.fs, format, v.Interface()) - } else { - fmt.Fprintf(f.fs, format, v.String()) - } - } -} - -// Format satisfies the fmt.Formatter interface. See NewFormatter for usage -// details. -func (f *formatState) Format(fs fmt.State, verb rune) { - f.fs = fs - - // Use standard formatting for verbs that are not v. - if verb != 'v' { - format := f.constructOrigFormat(verb) - fmt.Fprintf(fs, format, f.value) - return - } - - if f.value == nil { - if fs.Flag('#') { - fs.Write(interfaceBytes) - } - fs.Write(nilAngleBytes) - return - } - - f.format(reflect.ValueOf(f.value)) -} - -// newFormatter is a helper function to consolidate the logic from the various -// public methods which take varying config states. -func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter { - fs := &formatState{value: v, cs: cs} - fs.pointers = make(map[uintptr]int) - return fs -} - -/* -NewFormatter returns a custom formatter that satisfies the fmt.Formatter -interface. As a result, it integrates cleanly with standard fmt package -printing functions. The formatter is useful for inline printing of smaller data -types similar to the standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Typically this function shouldn't be called directly. It is much easier to make -use of the custom formatter by calling one of the convenience functions such as -Printf, Println, or Fprintf. -*/ -func NewFormatter(v interface{}) fmt.Formatter { - return newFormatter(&Config, v) -} diff --git a/cmd/bpa-operator/vendor/github.com/davecgh/go-spew/spew/spew.go b/cmd/bpa-operator/vendor/github.com/davecgh/go-spew/spew/spew.go deleted file mode 100644 index 32c0e33..0000000 --- a/cmd/bpa-operator/vendor/github.com/davecgh/go-spew/spew/spew.go +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "fmt" - "io" -) - -// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the formatted string as a value that satisfies error. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Errorf(format string, a ...interface{}) (err error) { - return fmt.Errorf(format, convertArgs(a)...) -} - -// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprint(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprint(w, convertArgs(a)...) -} - -// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { - return fmt.Fprintf(w, format, convertArgs(a)...) -} - -// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it -// passed with a default Formatter interface returned by NewFormatter. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprintln(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprintln(w, convertArgs(a)...) -} - -// Print is a wrapper for fmt.Print that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b)) -func Print(a ...interface{}) (n int, err error) { - return fmt.Print(convertArgs(a)...) -} - -// Printf is a wrapper for fmt.Printf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Printf(format string, a ...interface{}) (n int, err error) { - return fmt.Printf(format, convertArgs(a)...) -} - -// Println is a wrapper for fmt.Println that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b)) -func Println(a ...interface{}) (n int, err error) { - return fmt.Println(convertArgs(a)...) -} - -// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprint(a ...interface{}) string { - return fmt.Sprint(convertArgs(a)...) -} - -// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprintf(format string, a ...interface{}) string { - return fmt.Sprintf(format, convertArgs(a)...) -} - -// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it -// were passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprintln(a ...interface{}) string { - return fmt.Sprintln(convertArgs(a)...) -} - -// convertArgs accepts a slice of arguments and returns a slice of the same -// length with each argument converted to a default spew Formatter interface. -func convertArgs(args []interface{}) (formatters []interface{}) { - formatters = make([]interface{}, len(args)) - for index, arg := range args { - formatters[index] = NewFormatter(arg) - } - return formatters -} diff --git a/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/.gitignore b/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/.gitignore deleted file mode 100644 index 80bed65..0000000 --- a/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -.DS_Store -bin - - diff --git a/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/.travis.yml b/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/.travis.yml deleted file mode 100644 index 1027f56..0000000 --- a/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -language: go - -script: - - go vet ./... - - go test -v ./... - -go: - - 1.3 - - 1.4 - - 1.5 - - 1.6 - - 1.7 - - tip diff --git a/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/LICENSE b/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/LICENSE deleted file mode 100644 index df83a9c..0000000 --- a/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/LICENSE +++ /dev/null @@ -1,8 +0,0 @@ -Copyright (c) 2012 Dave Grijalva - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - diff --git a/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md b/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md deleted file mode 100644 index 7fc1f79..0000000 --- a/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md +++ /dev/null @@ -1,97 +0,0 @@ -## Migration Guide from v2 -> v3 - -Version 3 adds several new, frequently requested features. To do so, it introduces a few breaking changes. We've worked to keep these as minimal as possible. This guide explains the breaking changes and how you can quickly update your code. - -### `Token.Claims` is now an interface type - -The most requested feature from the 2.0 verison of this library was the ability to provide a custom type to the JSON parser for claims. This was implemented by introducing a new interface, `Claims`, to replace `map[string]interface{}`. We also included two concrete implementations of `Claims`: `MapClaims` and `StandardClaims`. - -`MapClaims` is an alias for `map[string]interface{}` with built in validation behavior. It is the default claims type when using `Parse`. The usage is unchanged except you must type cast the claims property. - -The old example for parsing a token looked like this.. - -```go - if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil { - fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) - } -``` - -is now directly mapped to... - -```go - if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil { - claims := token.Claims.(jwt.MapClaims) - fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"]) - } -``` - -`StandardClaims` is designed to be embedded in your custom type. You can supply a custom claims type with the new `ParseWithClaims` function. Here's an example of using a custom claims type. - -```go - type MyCustomClaims struct { - User string - *StandardClaims - } - - if token, err := jwt.ParseWithClaims(tokenString, &MyCustomClaims{}, keyLookupFunc); err == nil { - claims := token.Claims.(*MyCustomClaims) - fmt.Printf("Token for user %v expires %v", claims.User, claims.StandardClaims.ExpiresAt) - } -``` - -### `ParseFromRequest` has been moved - -To keep this library focused on the tokens without becoming overburdened with complex request processing logic, `ParseFromRequest` and its new companion `ParseFromRequestWithClaims` have been moved to a subpackage, `request`. The method signatues have also been augmented to receive a new argument: `Extractor`. - -`Extractors` do the work of picking the token string out of a request. The interface is simple and composable. - -This simple parsing example: - -```go - if token, err := jwt.ParseFromRequest(tokenString, req, keyLookupFunc); err == nil { - fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) - } -``` - -is directly mapped to: - -```go - if token, err := request.ParseFromRequest(req, request.OAuth2Extractor, keyLookupFunc); err == nil { - claims := token.Claims.(jwt.MapClaims) - fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"]) - } -``` - -There are several concrete `Extractor` types provided for your convenience: - -* `HeaderExtractor` will search a list of headers until one contains content. -* `ArgumentExtractor` will search a list of keys in request query and form arguments until one contains content. -* `MultiExtractor` will try a list of `Extractors` in order until one returns content. -* `AuthorizationHeaderExtractor` will look in the `Authorization` header for a `Bearer` token. -* `OAuth2Extractor` searches the places an OAuth2 token would be specified (per the spec): `Authorization` header and `access_token` argument -* `PostExtractionFilter` wraps an `Extractor`, allowing you to process the content before it's parsed. A simple example is stripping the `Bearer ` text from a header - - -### RSA signing methods no longer accept `[]byte` keys - -Due to a [critical vulnerability](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/), we've decided the convenience of accepting `[]byte` instead of `rsa.PublicKey` or `rsa.PrivateKey` isn't worth the risk of misuse. - -To replace this behavior, we've added two helper methods: `ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error)` and `ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error)`. These are just simple helpers for unpacking PEM encoded PKCS1 and PKCS8 keys. If your keys are encoded any other way, all you need to do is convert them to the `crypto/rsa` package's types. - -```go - func keyLookupFunc(*Token) (interface{}, error) { - // Don't forget to validate the alg is what you expect: - if _, ok := token.Method.(*jwt.SigningMethodRSA); !ok { - return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"]) - } - - // Look up key - key, err := lookupPublicKey(token.Header["kid"]) - if err != nil { - return nil, err - } - - // Unpack key from PEM encoded PKCS8 - return jwt.ParseRSAPublicKeyFromPEM(key) - } -``` diff --git a/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/README.md b/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/README.md deleted file mode 100644 index d358d88..0000000 --- a/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/README.md +++ /dev/null @@ -1,100 +0,0 @@ -# jwt-go - -[![Build Status](https://travis-ci.org/dgrijalva/jwt-go.svg?branch=master)](https://travis-ci.org/dgrijalva/jwt-go) -[![GoDoc](https://godoc.org/github.com/dgrijalva/jwt-go?status.svg)](https://godoc.org/github.com/dgrijalva/jwt-go) - -A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html) - -**NEW VERSION COMING:** There have been a lot of improvements suggested since the version 3.0.0 released in 2016. I'm working now on cutting two different releases: 3.2.0 will contain any non-breaking changes or enhancements. 4.0.0 will follow shortly which will include breaking changes. See the 4.0.0 milestone to get an idea of what's coming. If you have other ideas, or would like to participate in 4.0.0, now's the time. If you depend on this library and don't want to be interrupted, I recommend you use your dependency mangement tool to pin to version 3. - -**SECURITY NOTICE:** Some older versions of Go have a security issue in the cryotp/elliptic. Recommendation is to upgrade to at least 1.8.3. See issue #216 for more detail. - -**SECURITY NOTICE:** It's important that you [validate the `alg` presented is what you expect](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key types match the expected alg, but you should take the extra step to verify it in your usage. See the examples provided. - -## What the heck is a JWT? - -JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens. - -In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](http://tools.ietf.org/html/rfc4648) encoded. The last part is the signature, encoded the same way. - -The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used. - -The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [the RFC](http://self-issued.info/docs/draft-jones-json-web-token.html) for information about reserved keys and the proper way to add your own. - -## What's in the box? - -This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own. - -## Examples - -See [the project documentation](https://godoc.org/github.com/dgrijalva/jwt-go) for examples of usage: - -* [Simple example of parsing and validating a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-Parse--Hmac) -* [Simple example of building and signing a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-New--Hmac) -* [Directory of Examples](https://godoc.org/github.com/dgrijalva/jwt-go#pkg-examples) - -## Extensions - -This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`. - -Here's an example of an extension that integrates with the Google App Engine signing tools: https://github.com/someone1/gcp-jwt-go - -## Compliance - -This library was last reviewed to comply with [RTF 7519](http://www.rfc-editor.org/info/rfc7519) dated May 2015 with a few notable differences: - -* In order to protect against accidental use of [Unsecured JWTs](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#UnsecuredJWT), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key. - -## Project Status & Versioning - -This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason). - -This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `master`. Periodically, versions will be tagged from `master`. You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases). - -While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v3`. It will do the right thing WRT semantic versioning. - -**BREAKING CHANGES:*** -* Version 3.0.0 includes _a lot_ of changes from the 2.x line, including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code. - -## Usage Tips - -### Signing vs Encryption - -A token is simply a JSON object that is signed by its author. this tells you exactly two things about the data: - -* The author of the token was in the possession of the signing secret -* The data has not been modified since it was signed - -It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. JWE is currently outside the scope of this library. - -### Choosing a Signing Method - -There are several signing methods available, and you should probably take the time to learn about the various options before choosing one. The principal design decision is most likely going to be symmetric vs asymmetric. - -Symmetric signing methods, such as HSA, use only a single secret. This is probably the simplest signing method to use since any `[]byte` can be used as a valid secret. They are also slightly computationally faster to use, though this rarely is enough to matter. Symmetric signing methods work the best when both producers and consumers of tokens are trusted, or even the same system. Since the same secret is used to both sign and validate tokens, you can't easily distribute the key for validation. - -Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification. - -### Signing Methods and Key Types - -Each signing method expects a different object type for its signing keys. See the package documentation for details. Here are the most common ones: - -* The [HMAC signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation -* The [RSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation -* The [ECDSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation - -### JWT and OAuth - -It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication. - -Without going too far down the rabbit hole, here's a description of the interaction of these technologies: - -* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth. -* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token. -* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL. - -## More - -Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go). - -The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation. diff --git a/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md b/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md deleted file mode 100644 index 6370298..0000000 --- a/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md +++ /dev/null @@ -1,118 +0,0 @@ -## `jwt-go` Version History - -#### 3.2.0 - -* Added method `ParseUnverified` to allow users to split up the tasks of parsing and validation -* HMAC signing method returns `ErrInvalidKeyType` instead of `ErrInvalidKey` where appropriate -* Added options to `request.ParseFromRequest`, which allows for an arbitrary list of modifiers to parsing behavior. Initial set include `WithClaims` and `WithParser`. Existing usage of this function will continue to work as before. -* Deprecated `ParseFromRequestWithClaims` to simplify API in the future. - -#### 3.1.0 - -* Improvements to `jwt` command line tool -* Added `SkipClaimsValidation` option to `Parser` -* Documentation updates - -#### 3.0.0 - -* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code - * Dropped support for `[]byte` keys when using RSA signing methods. This convenience feature could contribute to security vulnerabilities involving mismatched key types with signing methods. - * `ParseFromRequest` has been moved to `request` subpackage and usage has changed - * The `Claims` property on `Token` is now type `Claims` instead of `map[string]interface{}`. The default value is type `MapClaims`, which is an alias to `map[string]interface{}`. This makes it possible to use a custom type when decoding claims. -* Other Additions and Changes - * Added `Claims` interface type to allow users to decode the claims into a custom type - * Added `ParseWithClaims`, which takes a third argument of type `Claims`. Use this function instead of `Parse` if you have a custom type you'd like to decode into. - * Dramatically improved the functionality and flexibility of `ParseFromRequest`, which is now in the `request` subpackage - * Added `ParseFromRequestWithClaims` which is the `FromRequest` equivalent of `ParseWithClaims` - * Added new interface type `Extractor`, which is used for extracting JWT strings from http requests. Used with `ParseFromRequest` and `ParseFromRequestWithClaims`. - * Added several new, more specific, validation errors to error type bitmask - * Moved examples from README to executable example files - * Signing method registry is now thread safe - * Added new property to `ValidationError`, which contains the raw error returned by calls made by parse/verify (such as those returned by keyfunc or json parser) - -#### 2.7.0 - -This will likely be the last backwards compatible release before 3.0.0, excluding essential bug fixes. - -* Added new option `-show` to the `jwt` command that will just output the decoded token without verifying -* Error text for expired tokens includes how long it's been expired -* Fixed incorrect error returned from `ParseRSAPublicKeyFromPEM` -* Documentation updates - -#### 2.6.0 - -* Exposed inner error within ValidationError -* Fixed validation errors when using UseJSONNumber flag -* Added several unit tests - -#### 2.5.0 - -* Added support for signing method none. You shouldn't use this. The API tries to make this clear. -* Updated/fixed some documentation -* Added more helpful error message when trying to parse tokens that begin with `BEARER ` - -#### 2.4.0 - -* Added new type, Parser, to allow for configuration of various parsing parameters - * You can now specify a list of valid signing methods. Anything outside this set will be rejected. - * You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON -* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go) -* Fixed some bugs with ECDSA parsing - -#### 2.3.0 - -* Added support for ECDSA signing methods -* Added support for RSA PSS signing methods (requires go v1.4) - -#### 2.2.0 - -* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic. - -#### 2.1.0 - -Backwards compatible API change that was missed in 2.0.0. - -* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte` - -#### 2.0.0 - -There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change. - -The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`. - -It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`. - -* **Compatibility Breaking Changes** - * `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct` - * `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct` - * `KeyFunc` now returns `interface{}` instead of `[]byte` - * `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key - * `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key -* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type. - * Added public package global `SigningMethodHS256` - * Added public package global `SigningMethodHS384` - * Added public package global `SigningMethodHS512` -* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type. - * Added public package global `SigningMethodRS256` - * Added public package global `SigningMethodRS384` - * Added public package global `SigningMethodRS512` -* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged. -* Refactored the RSA implementation to be easier to read -* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM` - -#### 1.0.2 - -* Fixed bug in parsing public keys from certificates -* Added more tests around the parsing of keys for RS256 -* Code refactoring in RS256 implementation. No functional changes - -#### 1.0.1 - -* Fixed panic if RS256 signing method was passed an invalid key - -#### 1.0.0 - -* First versioned release -* API stabilized -* Supports creating, signing, parsing, and validating JWT tokens -* Supports RS256 and HS256 signing methods \ No newline at end of file diff --git a/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/claims.go b/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/claims.go deleted file mode 100644 index f0228f0..0000000 --- a/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/claims.go +++ /dev/null @@ -1,134 +0,0 @@ -package jwt - -import ( - "crypto/subtle" - "fmt" - "time" -) - -// For a type to be a Claims object, it must just have a Valid method that determines -// if the token is invalid for any supported reason -type Claims interface { - Valid() error -} - -// Structured version of Claims Section, as referenced at -// https://tools.ietf.org/html/rfc7519#section-4.1 -// See examples for how to use this with your own claim types -type StandardClaims struct { - Audience string `json:"aud,omitempty"` - ExpiresAt int64 `json:"exp,omitempty"` - Id string `json:"jti,omitempty"` - IssuedAt int64 `json:"iat,omitempty"` - Issuer string `json:"iss,omitempty"` - NotBefore int64 `json:"nbf,omitempty"` - Subject string `json:"sub,omitempty"` -} - -// Validates time based claims "exp, iat, nbf". -// There is no accounting for clock skew. -// As well, if any of the above claims are not in the token, it will still -// be considered a valid claim. -func (c StandardClaims) Valid() error { - vErr := new(ValidationError) - now := TimeFunc().Unix() - - // The claims below are optional, by default, so if they are set to the - // default value in Go, let's not fail the verification for them. - if c.VerifyExpiresAt(now, false) == false { - delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0)) - vErr.Inner = fmt.Errorf("token is expired by %v", delta) - vErr.Errors |= ValidationErrorExpired - } - - if c.VerifyIssuedAt(now, false) == false { - vErr.Inner = fmt.Errorf("Token used before issued") - vErr.Errors |= ValidationErrorIssuedAt - } - - if c.VerifyNotBefore(now, false) == false { - vErr.Inner = fmt.Errorf("token is not valid yet") - vErr.Errors |= ValidationErrorNotValidYet - } - - if vErr.valid() { - return nil - } - - return vErr -} - -// Compares the aud claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool { - return verifyAud(c.Audience, cmp, req) -} - -// Compares the exp claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (c *StandardClaims) VerifyExpiresAt(cmp int64, req bool) bool { - return verifyExp(c.ExpiresAt, cmp, req) -} - -// Compares the iat claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (c *StandardClaims) VerifyIssuedAt(cmp int64, req bool) bool { - return verifyIat(c.IssuedAt, cmp, req) -} - -// Compares the iss claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (c *StandardClaims) VerifyIssuer(cmp string, req bool) bool { - return verifyIss(c.Issuer, cmp, req) -} - -// Compares the nbf claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool { - return verifyNbf(c.NotBefore, cmp, req) -} - -// ----- helpers - -func verifyAud(aud string, cmp string, required bool) bool { - if aud == "" { - return !required - } - if subtle.ConstantTimeCompare([]byte(aud), []byte(cmp)) != 0 { - return true - } else { - return false - } -} - -func verifyExp(exp int64, now int64, required bool) bool { - if exp == 0 { - return !required - } - return now <= exp -} - -func verifyIat(iat int64, now int64, required bool) bool { - if iat == 0 { - return !required - } - return now >= iat -} - -func verifyIss(iss string, cmp string, required bool) bool { - if iss == "" { - return !required - } - if subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0 { - return true - } else { - return false - } -} - -func verifyNbf(nbf int64, now int64, required bool) bool { - if nbf == 0 { - return !required - } - return now >= nbf -} diff --git a/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/doc.go b/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/doc.go deleted file mode 100644 index a86dc1a..0000000 --- a/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html -// -// See README.md for more info. -package jwt diff --git a/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/ecdsa.go b/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/ecdsa.go deleted file mode 100644 index f977381..0000000 --- a/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/ecdsa.go +++ /dev/null @@ -1,148 +0,0 @@ -package jwt - -import ( - "crypto" - "crypto/ecdsa" - "crypto/rand" - "errors" - "math/big" -) - -var ( - // Sadly this is missing from crypto/ecdsa compared to crypto/rsa - ErrECDSAVerification = errors.New("crypto/ecdsa: verification error") -) - -// Implements the ECDSA family of signing methods signing methods -// Expects *ecdsa.PrivateKey for signing and *ecdsa.PublicKey for verification -type SigningMethodECDSA struct { - Name string - Hash crypto.Hash - KeySize int - CurveBits int -} - -// Specific instances for EC256 and company -var ( - SigningMethodES256 *SigningMethodECDSA - SigningMethodES384 *SigningMethodECDSA - SigningMethodES512 *SigningMethodECDSA -) - -func init() { - // ES256 - SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256} - RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod { - return SigningMethodES256 - }) - - // ES384 - SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384} - RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod { - return SigningMethodES384 - }) - - // ES512 - SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521} - RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod { - return SigningMethodES512 - }) -} - -func (m *SigningMethodECDSA) Alg() string { - return m.Name -} - -// Implements the Verify method from SigningMethod -// For this verify method, key must be an ecdsa.PublicKey struct -func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error { - var err error - - // Decode the signature - var sig []byte - if sig, err = DecodeSegment(signature); err != nil { - return err - } - - // Get the key - var ecdsaKey *ecdsa.PublicKey - switch k := key.(type) { - case *ecdsa.PublicKey: - ecdsaKey = k - default: - return ErrInvalidKeyType - } - - if len(sig) != 2*m.KeySize { - return ErrECDSAVerification - } - - r := big.NewInt(0).SetBytes(sig[:m.KeySize]) - s := big.NewInt(0).SetBytes(sig[m.KeySize:]) - - // Create hasher - if !m.Hash.Available() { - return ErrHashUnavailable - } - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - // Verify the signature - if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus == true { - return nil - } else { - return ErrECDSAVerification - } -} - -// Implements the Sign method from SigningMethod -// For this signing method, key must be an ecdsa.PrivateKey struct -func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) { - // Get the key - var ecdsaKey *ecdsa.PrivateKey - switch k := key.(type) { - case *ecdsa.PrivateKey: - ecdsaKey = k - default: - return "", ErrInvalidKeyType - } - - // Create the hasher - if !m.Hash.Available() { - return "", ErrHashUnavailable - } - - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - // Sign the string and return r, s - if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil { - curveBits := ecdsaKey.Curve.Params().BitSize - - if m.CurveBits != curveBits { - return "", ErrInvalidKey - } - - keyBytes := curveBits / 8 - if curveBits%8 > 0 { - keyBytes += 1 - } - - // We serialize the outpus (r and s) into big-endian byte arrays and pad - // them with zeros on the left to make sure the sizes work out. Both arrays - // must be keyBytes long, and the output must be 2*keyBytes long. - rBytes := r.Bytes() - rBytesPadded := make([]byte, keyBytes) - copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) - - sBytes := s.Bytes() - sBytesPadded := make([]byte, keyBytes) - copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) - - out := append(rBytesPadded, sBytesPadded...) - - return EncodeSegment(out), nil - } else { - return "", err - } -} diff --git a/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go b/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go deleted file mode 100644 index d19624b..0000000 --- a/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go +++ /dev/null @@ -1,67 +0,0 @@ -package jwt - -import ( - "crypto/ecdsa" - "crypto/x509" - "encoding/pem" - "errors" -) - -var ( - ErrNotECPublicKey = errors.New("Key is not a valid ECDSA public key") - ErrNotECPrivateKey = errors.New("Key is not a valid ECDSA private key") -) - -// Parse PEM encoded Elliptic Curve Private Key Structure -func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) { - var err error - - // Parse PEM block - var block *pem.Block - if block, _ = pem.Decode(key); block == nil { - return nil, ErrKeyMustBePEMEncoded - } - - // Parse the key - var parsedKey interface{} - if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil { - return nil, err - } - - var pkey *ecdsa.PrivateKey - var ok bool - if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok { - return nil, ErrNotECPrivateKey - } - - return pkey, nil -} - -// Parse PEM encoded PKCS1 or PKCS8 public key -func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) { - var err error - - // Parse PEM block - var block *pem.Block - if block, _ = pem.Decode(key); block == nil { - return nil, ErrKeyMustBePEMEncoded - } - - // Parse the key - var parsedKey interface{} - if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { - if cert, err := x509.ParseCertificate(block.Bytes); err == nil { - parsedKey = cert.PublicKey - } else { - return nil, err - } - } - - var pkey *ecdsa.PublicKey - var ok bool - if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok { - return nil, ErrNotECPublicKey - } - - return pkey, nil -} diff --git a/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/errors.go b/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/errors.go deleted file mode 100644 index 1c93024..0000000 --- a/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/errors.go +++ /dev/null @@ -1,59 +0,0 @@ -package jwt - -import ( - "errors" -) - -// Error constants -var ( - ErrInvalidKey = errors.New("key is invalid") - ErrInvalidKeyType = errors.New("key is of invalid type") - ErrHashUnavailable = errors.New("the requested hash function is unavailable") -) - -// The errors that might occur when parsing and validating a token -const ( - ValidationErrorMalformed uint32 = 1 << iota // Token is malformed - ValidationErrorUnverifiable // Token could not be verified because of signing problems - ValidationErrorSignatureInvalid // Signature validation failed - - // Standard Claim validation errors - ValidationErrorAudience // AUD validation failed - ValidationErrorExpired // EXP validation failed - ValidationErrorIssuedAt // IAT validation failed - ValidationErrorIssuer // ISS validation failed - ValidationErrorNotValidYet // NBF validation failed - ValidationErrorId // JTI validation failed - ValidationErrorClaimsInvalid // Generic claims validation error -) - -// Helper for constructing a ValidationError with a string error message -func NewValidationError(errorText string, errorFlags uint32) *ValidationError { - return &ValidationError{ - text: errorText, - Errors: errorFlags, - } -} - -// The error from Parse if token is not valid -type ValidationError struct { - Inner error // stores the error returned by external dependencies, i.e.: KeyFunc - Errors uint32 // bitfield. see ValidationError... constants - text string // errors that do not have a valid error just have text -} - -// Validation error is an error type -func (e ValidationError) Error() string { - if e.Inner != nil { - return e.Inner.Error() - } else if e.text != "" { - return e.text - } else { - return "token is invalid" - } -} - -// No errors -func (e *ValidationError) valid() bool { - return e.Errors == 0 -} diff --git a/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/hmac.go b/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/hmac.go deleted file mode 100644 index addbe5d..0000000 --- a/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/hmac.go +++ /dev/null @@ -1,95 +0,0 @@ -package jwt - -import ( - "crypto" - "crypto/hmac" - "errors" -) - -// Implements the HMAC-SHA family of signing methods signing methods -// Expects key type of []byte for both signing and validation -type SigningMethodHMAC struct { - Name string - Hash crypto.Hash -} - -// Specific instances for HS256 and company -var ( - SigningMethodHS256 *SigningMethodHMAC - SigningMethodHS384 *SigningMethodHMAC - SigningMethodHS512 *SigningMethodHMAC - ErrSignatureInvalid = errors.New("signature is invalid") -) - -func init() { - // HS256 - SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256} - RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod { - return SigningMethodHS256 - }) - - // HS384 - SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384} - RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod { - return SigningMethodHS384 - }) - - // HS512 - SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512} - RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod { - return SigningMethodHS512 - }) -} - -func (m *SigningMethodHMAC) Alg() string { - return m.Name -} - -// Verify the signature of HSXXX tokens. Returns nil if the signature is valid. -func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error { - // Verify the key is the right type - keyBytes, ok := key.([]byte) - if !ok { - return ErrInvalidKeyType - } - - // Decode signature, for comparison - sig, err := DecodeSegment(signature) - if err != nil { - return err - } - - // Can we use the specified hashing method? - if !m.Hash.Available() { - return ErrHashUnavailable - } - - // This signing method is symmetric, so we validate the signature - // by reproducing the signature from the signing string and key, then - // comparing that against the provided signature. - hasher := hmac.New(m.Hash.New, keyBytes) - hasher.Write([]byte(signingString)) - if !hmac.Equal(sig, hasher.Sum(nil)) { - return ErrSignatureInvalid - } - - // No validation errors. Signature is good. - return nil -} - -// Implements the Sign method from SigningMethod for this signing method. -// Key must be []byte -func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) { - if keyBytes, ok := key.([]byte); ok { - if !m.Hash.Available() { - return "", ErrHashUnavailable - } - - hasher := hmac.New(m.Hash.New, keyBytes) - hasher.Write([]byte(signingString)) - - return EncodeSegment(hasher.Sum(nil)), nil - } - - return "", ErrInvalidKeyType -} diff --git a/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/map_claims.go b/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/map_claims.go deleted file mode 100644 index 291213c..0000000 --- a/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/map_claims.go +++ /dev/null @@ -1,94 +0,0 @@ -package jwt - -import ( - "encoding/json" - "errors" - // "fmt" -) - -// Claims type that uses the map[string]interface{} for JSON decoding -// This is the default claims type if you don't supply one -type MapClaims map[string]interface{} - -// Compares the aud claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (m MapClaims) VerifyAudience(cmp string, req bool) bool { - aud, _ := m["aud"].(string) - return verifyAud(aud, cmp, req) -} - -// Compares the exp claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (m MapClaims) VerifyExpiresAt(cmp int64, req bool) bool { - switch exp := m["exp"].(type) { - case float64: - return verifyExp(int64(exp), cmp, req) - case json.Number: - v, _ := exp.Int64() - return verifyExp(v, cmp, req) - } - return req == false -} - -// Compares the iat claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (m MapClaims) VerifyIssuedAt(cmp int64, req bool) bool { - switch iat := m["iat"].(type) { - case float64: - return verifyIat(int64(iat), cmp, req) - case json.Number: - v, _ := iat.Int64() - return verifyIat(v, cmp, req) - } - return req == false -} - -// Compares the iss claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (m MapClaims) VerifyIssuer(cmp string, req bool) bool { - iss, _ := m["iss"].(string) - return verifyIss(iss, cmp, req) -} - -// Compares the nbf claim against cmp. -// If required is false, this method will return true if the value matches or is unset -func (m MapClaims) VerifyNotBefore(cmp int64, req bool) bool { - switch nbf := m["nbf"].(type) { - case float64: - return verifyNbf(int64(nbf), cmp, req) - case json.Number: - v, _ := nbf.Int64() - return verifyNbf(v, cmp, req) - } - return req == false -} - -// Validates time based claims "exp, iat, nbf". -// There is no accounting for clock skew. -// As well, if any of the above claims are not in the token, it will still -// be considered a valid claim. -func (m MapClaims) Valid() error { - vErr := new(ValidationError) - now := TimeFunc().Unix() - - if m.VerifyExpiresAt(now, false) == false { - vErr.Inner = errors.New("Token is expired") - vErr.Errors |= ValidationErrorExpired - } - - if m.VerifyIssuedAt(now, false) == false { - vErr.Inner = errors.New("Token used before issued") - vErr.Errors |= ValidationErrorIssuedAt - } - - if m.VerifyNotBefore(now, false) == false { - vErr.Inner = errors.New("Token is not valid yet") - vErr.Errors |= ValidationErrorNotValidYet - } - - if vErr.valid() { - return nil - } - - return vErr -} diff --git a/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/none.go b/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/none.go deleted file mode 100644 index f04d189..0000000 --- a/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/none.go +++ /dev/null @@ -1,52 +0,0 @@ -package jwt - -// Implements the none signing method. This is required by the spec -// but you probably should never use it. -var SigningMethodNone *signingMethodNone - -const UnsafeAllowNoneSignatureType unsafeNoneMagicConstant = "none signing method allowed" - -var NoneSignatureTypeDisallowedError error - -type signingMethodNone struct{} -type unsafeNoneMagicConstant string - -func init() { - SigningMethodNone = &signingMethodNone{} - NoneSignatureTypeDisallowedError = NewValidationError("'none' signature type is not allowed", ValidationErrorSignatureInvalid) - - RegisterSigningMethod(SigningMethodNone.Alg(), func() SigningMethod { - return SigningMethodNone - }) -} - -func (m *signingMethodNone) Alg() string { - return "none" -} - -// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key -func (m *signingMethodNone) Verify(signingString, signature string, key interface{}) (err error) { - // Key must be UnsafeAllowNoneSignatureType to prevent accidentally - // accepting 'none' signing method - if _, ok := key.(unsafeNoneMagicConstant); !ok { - return NoneSignatureTypeDisallowedError - } - // If signing method is none, signature must be an empty string - if signature != "" { - return NewValidationError( - "'none' signing method with non-empty signature", - ValidationErrorSignatureInvalid, - ) - } - - // Accept 'none' signing method. - return nil -} - -// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key -func (m *signingMethodNone) Sign(signingString string, key interface{}) (string, error) { - if _, ok := key.(unsafeNoneMagicConstant); ok { - return "", nil - } - return "", NoneSignatureTypeDisallowedError -} diff --git a/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/parser.go b/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/parser.go deleted file mode 100644 index d6901d9..0000000 --- a/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/parser.go +++ /dev/null @@ -1,148 +0,0 @@ -package jwt - -import ( - "bytes" - "encoding/json" - "fmt" - "strings" -) - -type Parser struct { - ValidMethods []string // If populated, only these methods will be considered valid - UseJSONNumber bool // Use JSON Number format in JSON decoder - SkipClaimsValidation bool // Skip claims validation during token parsing -} - -// Parse, validate, and return a token. -// keyFunc will receive the parsed token and should return the key for validating. -// If everything is kosher, err will be nil -func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { - return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc) -} - -func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { - token, parts, err := p.ParseUnverified(tokenString, claims) - if err != nil { - return token, err - } - - // Verify signing method is in the required set - if p.ValidMethods != nil { - var signingMethodValid = false - var alg = token.Method.Alg() - for _, m := range p.ValidMethods { - if m == alg { - signingMethodValid = true - break - } - } - if !signingMethodValid { - // signing method is not in the listed set - return token, NewValidationError(fmt.Sprintf("signing method %v is invalid", alg), ValidationErrorSignatureInvalid) - } - } - - // Lookup key - var key interface{} - if keyFunc == nil { - // keyFunc was not provided. short circuiting validation - return token, NewValidationError("no Keyfunc was provided.", ValidationErrorUnverifiable) - } - if key, err = keyFunc(token); err != nil { - // keyFunc returned an error - if ve, ok := err.(*ValidationError); ok { - return token, ve - } - return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable} - } - - vErr := &ValidationError{} - - // Validate Claims - if !p.SkipClaimsValidation { - if err := token.Claims.Valid(); err != nil { - - // If the Claims Valid returned an error, check if it is a validation error, - // If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set - if e, ok := err.(*ValidationError); !ok { - vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid} - } else { - vErr = e - } - } - } - - // Perform validation - token.Signature = parts[2] - if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil { - vErr.Inner = err - vErr.Errors |= ValidationErrorSignatureInvalid - } - - if vErr.valid() { - token.Valid = true - return token, nil - } - - return token, vErr -} - -// WARNING: Don't use this method unless you know what you're doing -// -// This method parses the token but doesn't validate the signature. It's only -// ever useful in cases where you know the signature is valid (because it has -// been checked previously in the stack) and you want to extract values from -// it. -func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) { - parts = strings.Split(tokenString, ".") - if len(parts) != 3 { - return nil, parts, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed) - } - - token = &Token{Raw: tokenString} - - // parse Header - var headerBytes []byte - if headerBytes, err = DecodeSegment(parts[0]); err != nil { - if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") { - return token, parts, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed) - } - return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} - } - if err = json.Unmarshal(headerBytes, &token.Header); err != nil { - return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} - } - - // parse Claims - var claimBytes []byte - token.Claims = claims - - if claimBytes, err = DecodeSegment(parts[1]); err != nil { - return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} - } - dec := json.NewDecoder(bytes.NewBuffer(claimBytes)) - if p.UseJSONNumber { - dec.UseNumber() - } - // JSON Decode. Special case for map type to avoid weird pointer behavior - if c, ok := token.Claims.(MapClaims); ok { - err = dec.Decode(&c) - } else { - err = dec.Decode(&claims) - } - // Handle decode error - if err != nil { - return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} - } - - // Lookup signature method - if method, ok := token.Header["alg"].(string); ok { - if token.Method = GetSigningMethod(method); token.Method == nil { - return token, parts, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable) - } - } else { - return token, parts, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable) - } - - return token, parts, nil -} diff --git a/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/rsa.go b/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/rsa.go deleted file mode 100644 index e4caf1c..0000000 --- a/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/rsa.go +++ /dev/null @@ -1,101 +0,0 @@ -package jwt - -import ( - "crypto" - "crypto/rand" - "crypto/rsa" -) - -// Implements the RSA family of signing methods signing methods -// Expects *rsa.PrivateKey for signing and *rsa.PublicKey for validation -type SigningMethodRSA struct { - Name string - Hash crypto.Hash -} - -// Specific instances for RS256 and company -var ( - SigningMethodRS256 *SigningMethodRSA - SigningMethodRS384 *SigningMethodRSA - SigningMethodRS512 *SigningMethodRSA -) - -func init() { - // RS256 - SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256} - RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod { - return SigningMethodRS256 - }) - - // RS384 - SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384} - RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod { - return SigningMethodRS384 - }) - - // RS512 - SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512} - RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod { - return SigningMethodRS512 - }) -} - -func (m *SigningMethodRSA) Alg() string { - return m.Name -} - -// Implements the Verify method from SigningMethod -// For this signing method, must be an *rsa.PublicKey structure. -func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error { - var err error - - // Decode the signature - var sig []byte - if sig, err = DecodeSegment(signature); err != nil { - return err - } - - var rsaKey *rsa.PublicKey - var ok bool - - if rsaKey, ok = key.(*rsa.PublicKey); !ok { - return ErrInvalidKeyType - } - - // Create hasher - if !m.Hash.Available() { - return ErrHashUnavailable - } - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - // Verify the signature - return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig) -} - -// Implements the Sign method from SigningMethod -// For this signing method, must be an *rsa.PrivateKey structure. -func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) { - var rsaKey *rsa.PrivateKey - var ok bool - - // Validate type of key - if rsaKey, ok = key.(*rsa.PrivateKey); !ok { - return "", ErrInvalidKey - } - - // Create the hasher - if !m.Hash.Available() { - return "", ErrHashUnavailable - } - - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - // Sign the string and return the encoded bytes - if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil { - return EncodeSegment(sigBytes), nil - } else { - return "", err - } -} diff --git a/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go b/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go deleted file mode 100644 index 10ee9db..0000000 --- a/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go +++ /dev/null @@ -1,126 +0,0 @@ -// +build go1.4 - -package jwt - -import ( - "crypto" - "crypto/rand" - "crypto/rsa" -) - -// Implements the RSAPSS family of signing methods signing methods -type SigningMethodRSAPSS struct { - *SigningMethodRSA - Options *rsa.PSSOptions -} - -// Specific instances for RS/PS and company -var ( - SigningMethodPS256 *SigningMethodRSAPSS - SigningMethodPS384 *SigningMethodRSAPSS - SigningMethodPS512 *SigningMethodRSAPSS -) - -func init() { - // PS256 - SigningMethodPS256 = &SigningMethodRSAPSS{ - &SigningMethodRSA{ - Name: "PS256", - Hash: crypto.SHA256, - }, - &rsa.PSSOptions{ - SaltLength: rsa.PSSSaltLengthAuto, - Hash: crypto.SHA256, - }, - } - RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod { - return SigningMethodPS256 - }) - - // PS384 - SigningMethodPS384 = &SigningMethodRSAPSS{ - &SigningMethodRSA{ - Name: "PS384", - Hash: crypto.SHA384, - }, - &rsa.PSSOptions{ - SaltLength: rsa.PSSSaltLengthAuto, - Hash: crypto.SHA384, - }, - } - RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod { - return SigningMethodPS384 - }) - - // PS512 - SigningMethodPS512 = &SigningMethodRSAPSS{ - &SigningMethodRSA{ - Name: "PS512", - Hash: crypto.SHA512, - }, - &rsa.PSSOptions{ - SaltLength: rsa.PSSSaltLengthAuto, - Hash: crypto.SHA512, - }, - } - RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod { - return SigningMethodPS512 - }) -} - -// Implements the Verify method from SigningMethod -// For this verify method, key must be an rsa.PublicKey struct -func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error { - var err error - - // Decode the signature - var sig []byte - if sig, err = DecodeSegment(signature); err != nil { - return err - } - - var rsaKey *rsa.PublicKey - switch k := key.(type) { - case *rsa.PublicKey: - rsaKey = k - default: - return ErrInvalidKey - } - - // Create hasher - if !m.Hash.Available() { - return ErrHashUnavailable - } - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, m.Options) -} - -// Implements the Sign method from SigningMethod -// For this signing method, key must be an rsa.PrivateKey struct -func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) { - var rsaKey *rsa.PrivateKey - - switch k := key.(type) { - case *rsa.PrivateKey: - rsaKey = k - default: - return "", ErrInvalidKeyType - } - - // Create the hasher - if !m.Hash.Available() { - return "", ErrHashUnavailable - } - - hasher := m.Hash.New() - hasher.Write([]byte(signingString)) - - // Sign the string and return the encoded bytes - if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil { - return EncodeSegment(sigBytes), nil - } else { - return "", err - } -} diff --git a/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go b/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go deleted file mode 100644 index a5ababf..0000000 --- a/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go +++ /dev/null @@ -1,101 +0,0 @@ -package jwt - -import ( - "crypto/rsa" - "crypto/x509" - "encoding/pem" - "errors" -) - -var ( - ErrKeyMustBePEMEncoded = errors.New("Invalid Key: Key must be PEM encoded PKCS1 or PKCS8 private key") - ErrNotRSAPrivateKey = errors.New("Key is not a valid RSA private key") - ErrNotRSAPublicKey = errors.New("Key is not a valid RSA public key") -) - -// Parse PEM encoded PKCS1 or PKCS8 private key -func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) { - var err error - - // Parse PEM block - var block *pem.Block - if block, _ = pem.Decode(key); block == nil { - return nil, ErrKeyMustBePEMEncoded - } - - var parsedKey interface{} - if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil { - if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { - return nil, err - } - } - - var pkey *rsa.PrivateKey - var ok bool - if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { - return nil, ErrNotRSAPrivateKey - } - - return pkey, nil -} - -// Parse PEM encoded PKCS1 or PKCS8 private key protected with password -func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.PrivateKey, error) { - var err error - - // Parse PEM block - var block *pem.Block - if block, _ = pem.Decode(key); block == nil { - return nil, ErrKeyMustBePEMEncoded - } - - var parsedKey interface{} - - var blockDecrypted []byte - if blockDecrypted, err = x509.DecryptPEMBlock(block, []byte(password)); err != nil { - return nil, err - } - - if parsedKey, err = x509.ParsePKCS1PrivateKey(blockDecrypted); err != nil { - if parsedKey, err = x509.ParsePKCS8PrivateKey(blockDecrypted); err != nil { - return nil, err - } - } - - var pkey *rsa.PrivateKey - var ok bool - if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { - return nil, ErrNotRSAPrivateKey - } - - return pkey, nil -} - -// Parse PEM encoded PKCS1 or PKCS8 public key -func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) { - var err error - - // Parse PEM block - var block *pem.Block - if block, _ = pem.Decode(key); block == nil { - return nil, ErrKeyMustBePEMEncoded - } - - // Parse the key - var parsedKey interface{} - if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { - if cert, err := x509.ParseCertificate(block.Bytes); err == nil { - parsedKey = cert.PublicKey - } else { - return nil, err - } - } - - var pkey *rsa.PublicKey - var ok bool - if pkey, ok = parsedKey.(*rsa.PublicKey); !ok { - return nil, ErrNotRSAPublicKey - } - - return pkey, nil -} diff --git a/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/signing_method.go b/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/signing_method.go deleted file mode 100644 index ed1f212..0000000 --- a/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/signing_method.go +++ /dev/null @@ -1,35 +0,0 @@ -package jwt - -import ( - "sync" -) - -var signingMethods = map[string]func() SigningMethod{} -var signingMethodLock = new(sync.RWMutex) - -// Implement SigningMethod to add new methods for signing or verifying tokens. -type SigningMethod interface { - Verify(signingString, signature string, key interface{}) error // Returns nil if signature is valid - Sign(signingString string, key interface{}) (string, error) // Returns encoded signature or error - Alg() string // returns the alg identifier for this method (example: 'HS256') -} - -// Register the "alg" name and a factory function for signing method. -// This is typically done during init() in the method's implementation -func RegisterSigningMethod(alg string, f func() SigningMethod) { - signingMethodLock.Lock() - defer signingMethodLock.Unlock() - - signingMethods[alg] = f -} - -// Get a signing method from an "alg" string -func GetSigningMethod(alg string) (method SigningMethod) { - signingMethodLock.RLock() - defer signingMethodLock.RUnlock() - - if methodF, ok := signingMethods[alg]; ok { - method = methodF() - } - return -} diff --git a/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/token.go b/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/token.go deleted file mode 100644 index d637e08..0000000 --- a/cmd/bpa-operator/vendor/github.com/dgrijalva/jwt-go/token.go +++ /dev/null @@ -1,108 +0,0 @@ -package jwt - -import ( - "encoding/base64" - "encoding/json" - "strings" - "time" -) - -// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time). -// You can override it to use another time value. This is useful for testing or if your -// server uses a different time zone than your tokens. -var TimeFunc = time.Now - -// Parse methods use this callback function to supply -// the key for verification. The function receives the parsed, -// but unverified Token. This allows you to use properties in the -// Header of the token (such as `kid`) to identify which key to use. -type Keyfunc func(*Token) (interface{}, error) - -// A JWT Token. Different fields will be used depending on whether you're -// creating or parsing/verifying a token. -type Token struct { - Raw string // The raw token. Populated when you Parse a token - Method SigningMethod // The signing method used or to be used - Header map[string]interface{} // The first segment of the token - Claims Claims // The second segment of the token - Signature string // The third segment of the token. Populated when you Parse a token - Valid bool // Is the token valid? Populated when you Parse/Verify a token -} - -// Create a new Token. Takes a signing method -func New(method SigningMethod) *Token { - return NewWithClaims(method, MapClaims{}) -} - -func NewWithClaims(method SigningMethod, claims Claims) *Token { - return &Token{ - Header: map[string]interface{}{ - "typ": "JWT", - "alg": method.Alg(), - }, - Claims: claims, - Method: method, - } -} - -// Get the complete, signed token -func (t *Token) SignedString(key interface{}) (string, error) { - var sig, sstr string - var err error - if sstr, err = t.SigningString(); err != nil { - return "", err - } - if sig, err = t.Method.Sign(sstr, key); err != nil { - return "", err - } - return strings.Join([]string{sstr, sig}, "."), nil -} - -// Generate the signing string. This is the -// most expensive part of the whole deal. Unless you -// need this for something special, just go straight for -// the SignedString. -func (t *Token) SigningString() (string, error) { - var err error - parts := make([]string, 2) - for i, _ := range parts { - var jsonValue []byte - if i == 0 { - if jsonValue, err = json.Marshal(t.Header); err != nil { - return "", err - } - } else { - if jsonValue, err = json.Marshal(t.Claims); err != nil { - return "", err - } - } - - parts[i] = EncodeSegment(jsonValue) - } - return strings.Join(parts, "."), nil -} - -// Parse, validate, and return a token. -// keyFunc will receive the parsed token and should return the key for validating. -// If everything is kosher, err will be nil -func Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { - return new(Parser).Parse(tokenString, keyFunc) -} - -func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { - return new(Parser).ParseWithClaims(tokenString, claims, keyFunc) -} - -// Encode JWT specific base64url encoding with padding stripped -func EncodeSegment(seg []byte) string { - return strings.TrimRight(base64.URLEncoding.EncodeToString(seg), "=") -} - -// Decode JWT specific base64url encoding with padding stripped -func DecodeSegment(seg string) ([]byte, error) { - if l := len(seg) % 4; l > 0 { - seg += strings.Repeat("=", 4-l) - } - - return base64.URLEncoding.DecodeString(seg) -} diff --git a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/.gitignore b/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/.gitignore deleted file mode 100644 index cece7be..0000000 --- a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/.gitignore +++ /dev/null @@ -1,70 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe - -restful.html - -*.out - -tmp.prof - -go-restful.test - -examples/restful-basic-authentication - -examples/restful-encoding-filter - -examples/restful-filters - -examples/restful-hello-world - -examples/restful-resource-functions - -examples/restful-serve-static - -examples/restful-user-service - -*.DS_Store -examples/restful-user-resource - -examples/restful-multi-containers - -examples/restful-form-handling - -examples/restful-CORS-filter - -examples/restful-options-filter - -examples/restful-curly-router - -examples/restful-cpuprofiler-service - -examples/restful-pre-post-filters - -curly.prof - -examples/restful-NCSA-logging - -examples/restful-html-template - -s.html -restful-path-tail diff --git a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/.travis.yml b/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/.travis.yml deleted file mode 100644 index b22f8f5..0000000 --- a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/.travis.yml +++ /dev/null @@ -1,6 +0,0 @@ -language: go - -go: - - 1.x - -script: go test -v \ No newline at end of file diff --git a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/CHANGES.md b/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/CHANGES.md deleted file mode 100644 index da5bcaa..0000000 --- a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/CHANGES.md +++ /dev/null @@ -1,261 +0,0 @@ -## Change history of go-restful - - -v2.9.2 - -- Reduce allocations in per-request methods to improve performance (#395) - -v2.9.1 - -- Fix issue with default responses and invalid status code 0. (#393) - -v2.9.0 - -- add per Route content encoding setting (overrides container setting) - -v2.8.0 - -- add Request.QueryParameters() -- add json-iterator (via build tag) -- disable vgo module (until log is moved) - -v2.7.1 - -- add vgo module - -v2.6.1 - -- add JSONNewDecoderFunc to allow custom JSON Decoder usage (go 1.10+) - -v2.6.0 - -- Make JSR 311 routing and path param processing consistent -- Adding description to RouteBuilder.Reads() -- Update example for Swagger12 and OpenAPI - -2017-09-13 - -- added route condition functions using `.If(func)` in route building. - -2017-02-16 - -- solved issue #304, make operation names unique - -2017-01-30 - - [IMPORTANT] For swagger users, change your import statement to: - swagger "github.com/emicklei/go-restful-swagger12" - -- moved swagger 1.2 code to go-restful-swagger12 -- created TAG 2.0.0 - -2017-01-27 - -- remove defer request body close -- expose Dispatch for testing filters and Routefunctions -- swagger response model cannot be array -- created TAG 1.0.0 - -2016-12-22 - -- (API change) Remove code related to caching request content. Removes SetCacheReadEntity(doCache bool) - -2016-11-26 - -- Default change! now use CurlyRouter (was RouterJSR311) -- Default change! no more caching of request content -- Default change! do not recover from panics - -2016-09-22 - -- fix the DefaultRequestContentType feature - -2016-02-14 - -- take the qualify factor of the Accept header mediatype into account when deciding the contentype of the response -- add constructors for custom entity accessors for xml and json - -2015-09-27 - -- rename new WriteStatusAnd... to WriteHeaderAnd... for consistency - -2015-09-25 - -- fixed problem with changing Header after WriteHeader (issue 235) - -2015-09-14 - -- changed behavior of WriteHeader (immediate write) and WriteEntity (no status write) -- added support for custom EntityReaderWriters. - -2015-08-06 - -- add support for reading entities from compressed request content -- use sync.Pool for compressors of http response and request body -- add Description to Parameter for documentation in Swagger UI - -2015-03-20 - -- add configurable logging - -2015-03-18 - -- if not specified, the Operation is derived from the Route function - -2015-03-17 - -- expose Parameter creation functions -- make trace logger an interface -- fix OPTIONSFilter -- customize rendering of ServiceError -- JSR311 router now handles wildcards -- add Notes to Route - -2014-11-27 - -- (api add) PrettyPrint per response. (as proposed in #167) - -2014-11-12 - -- (api add) ApiVersion(.) for documentation in Swagger UI - -2014-11-10 - -- (api change) struct fields tagged with "description" show up in Swagger UI - -2014-10-31 - -- (api change) ReturnsError -> Returns -- (api add) RouteBuilder.Do(aBuilder) for DRY use of RouteBuilder -- fix swagger nested structs -- sort Swagger response messages by code - -2014-10-23 - -- (api add) ReturnsError allows you to document Http codes in swagger -- fixed problem with greedy CurlyRouter -- (api add) Access-Control-Max-Age in CORS -- add tracing functionality (injectable) for debugging purposes -- support JSON parse 64bit int -- fix empty parameters for swagger -- WebServicesUrl is now optional for swagger -- fixed duplicate AccessControlAllowOrigin in CORS -- (api change) expose ServeMux in container -- (api add) added AllowedDomains in CORS -- (api add) ParameterNamed for detailed documentation - -2014-04-16 - -- (api add) expose constructor of Request for testing. - -2014-06-27 - -- (api add) ParameterNamed gives access to a Parameter definition and its data (for further specification). -- (api add) SetCacheReadEntity allow scontrol over whether or not the request body is being cached (default true for compatibility reasons). - -2014-07-03 - -- (api add) CORS can be configured with a list of allowed domains - -2014-03-12 - -- (api add) Route path parameters can use wildcard or regular expressions. (requires CurlyRouter) - -2014-02-26 - -- (api add) Request now provides information about the matched Route, see method SelectedRoutePath - -2014-02-17 - -- (api change) renamed parameter constants (go-lint checks) - -2014-01-10 - -- (api add) support for CloseNotify, see http://golang.org/pkg/net/http/#CloseNotifier - -2014-01-07 - -- (api change) Write* methods in Response now return the error or nil. -- added example of serving HTML from a Go template. -- fixed comparing Allowed headers in CORS (is now case-insensitive) - -2013-11-13 - -- (api add) Response knows how many bytes are written to the response body. - -2013-10-29 - -- (api add) RecoverHandler(handler RecoverHandleFunction) to change how panic recovery is handled. Default behavior is to log and return a stacktrace. This may be a security issue as it exposes sourcecode information. - -2013-10-04 - -- (api add) Response knows what HTTP status has been written -- (api add) Request can have attributes (map of string->interface, also called request-scoped variables - -2013-09-12 - -- (api change) Router interface simplified -- Implemented CurlyRouter, a Router that does not use|allow regular expressions in paths - -2013-08-05 - - add OPTIONS support - - add CORS support - -2013-08-27 - -- fixed some reported issues (see github) -- (api change) deprecated use of WriteError; use WriteErrorString instead - -2014-04-15 - -- (fix) v1.0.1 tag: fix Issue 111: WriteErrorString - -2013-08-08 - -- (api add) Added implementation Container: a WebServices collection with its own http.ServeMux allowing multiple endpoints per program. Existing uses of go-restful will register their services to the DefaultContainer. -- (api add) the swagger package has be extended to have a UI per container. -- if panic is detected then a small stack trace is printed (thanks to runner-mei) -- (api add) WriteErrorString to Response - -Important API changes: - -- (api remove) package variable DoNotRecover no longer works ; use restful.DefaultContainer.DoNotRecover(true) instead. -- (api remove) package variable EnableContentEncoding no longer works ; use restful.DefaultContainer.EnableContentEncoding(true) instead. - - -2013-07-06 - -- (api add) Added support for response encoding (gzip and deflate(zlib)). This feature is disabled on default (for backwards compatibility). Use restful.EnableContentEncoding = true in your initialization to enable this feature. - -2013-06-19 - -- (improve) DoNotRecover option, moved request body closer, improved ReadEntity - -2013-06-03 - -- (api change) removed Dispatcher interface, hide PathExpression -- changed receiver names of type functions to be more idiomatic Go - -2013-06-02 - -- (optimize) Cache the RegExp compilation of Paths. - -2013-05-22 - -- (api add) Added support for request/response filter functions - -2013-05-18 - - -- (api add) Added feature to change the default Http Request Dispatch function (travis cline) -- (api change) Moved Swagger Webservice to swagger package (see example restful-user) - -[2012-11-14 .. 2013-05-18> - -- See https://github.com/emicklei/go-restful/commits - -2012-11-14 - -- Initial commit - - diff --git a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/LICENSE b/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/LICENSE deleted file mode 100644 index ece7ec6..0000000 --- a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2012,2013 Ernest Micklei - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/Makefile b/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/Makefile deleted file mode 100644 index b40081c..0000000 --- a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -all: test - -test: - go test -v . - -ex: - cd examples && ls *.go | xargs go build -o /tmp/ignore \ No newline at end of file diff --git a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/README.md b/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/README.md deleted file mode 100644 index f52c25a..0000000 --- a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/README.md +++ /dev/null @@ -1,88 +0,0 @@ -go-restful -========== -package for building REST-style Web Services using Google Go - -[![Build Status](https://travis-ci.org/emicklei/go-restful.png)](https://travis-ci.org/emicklei/go-restful) -[![Go Report Card](https://goreportcard.com/badge/github.com/emicklei/go-restful)](https://goreportcard.com/report/github.com/emicklei/go-restful) -[![GoDoc](https://godoc.org/github.com/emicklei/go-restful?status.svg)](https://godoc.org/github.com/emicklei/go-restful) - -- [Code examples](https://github.com/emicklei/go-restful/tree/master/examples) - -REST asks developers to use HTTP methods explicitly and in a way that's consistent with the protocol definition. This basic REST design principle establishes a one-to-one mapping between create, read, update, and delete (CRUD) operations and HTTP methods. According to this mapping: - -- GET = Retrieve a representation of a resource -- POST = Create if you are sending content to the server to create a subordinate of the specified resource collection, using some server-side algorithm. -- PUT = Create if you are sending the full content of the specified resource (URI). -- PUT = Update if you are updating the full content of the specified resource. -- DELETE = Delete if you are requesting the server to delete the resource -- PATCH = Update partial content of a resource -- OPTIONS = Get information about the communication options for the request URI - -### Example - -```Go -ws := new(restful.WebService) -ws. - Path("/users"). - Consumes(restful.MIME_XML, restful.MIME_JSON). - Produces(restful.MIME_JSON, restful.MIME_XML) - -ws.Route(ws.GET("/{user-id}").To(u.findUser). - Doc("get a user"). - Param(ws.PathParameter("user-id", "identifier of the user").DataType("string")). - Writes(User{})) -... - -func (u UserResource) findUser(request *restful.Request, response *restful.Response) { - id := request.PathParameter("user-id") - ... -} -``` - -[Full API of a UserResource](https://github.com/emicklei/go-restful/tree/master/examples/restful-user-resource.go) - -### Features - -- Routes for request → function mapping with path parameter (e.g. {id}) support -- Configurable router: - - (default) Fast routing algorithm that allows static elements, regular expressions and dynamic parameters in the URL path (e.g. /meetings/{id} or /static/{subpath:*} - - Routing algorithm after [JSR311](http://jsr311.java.net/nonav/releases/1.1/spec/spec.html) that is implemented using (but does **not** accept) regular expressions -- Request API for reading structs from JSON/XML and accesing parameters (path,query,header) -- Response API for writing structs to JSON/XML and setting headers -- Customizable encoding using EntityReaderWriter registration -- Filters for intercepting the request → response flow on Service or Route level -- Request-scoped variables using attributes -- Containers for WebServices on different HTTP endpoints -- Content encoding (gzip,deflate) of request and response payloads -- Automatic responses on OPTIONS (using a filter) -- Automatic CORS request handling (using a filter) -- API declaration for Swagger UI ([go-restful-openapi](https://github.com/emicklei/go-restful-openapi), see [go-restful-swagger12](https://github.com/emicklei/go-restful-swagger12)) -- Panic recovery to produce HTTP 500, customizable using RecoverHandler(...) -- Route errors produce HTTP 404/405/406/415 errors, customizable using ServiceErrorHandler(...) -- Configurable (trace) logging -- Customizable gzip/deflate readers and writers using CompressorProvider registration - -## How to customize -There are several hooks to customize the behavior of the go-restful package. - -- Router algorithm -- Panic recovery -- JSON decoder -- Trace logging -- Compression -- Encoders for other serializers -- Use [jsoniter](https://github.com/json-iterator/go) by build this package using a tag, e.g. `go build -tags=jsoniter .` - -TODO: write examples of these. - -## Resources - -- [Example posted on blog](http://ernestmicklei.com/2012/11/go-restful-first-working-example/) -- [Design explained on blog](http://ernestmicklei.com/2012/11/go-restful-api-design/) -- [sourcegraph](https://sourcegraph.com/github.com/emicklei/go-restful) -- [showcase: Zazkia - tcp proxy for testing resiliency](https://github.com/emicklei/zazkia) -- [showcase: Mora - MongoDB REST Api server](https://github.com/emicklei/mora) - -Type ```git shortlog -s``` for a full list of contributors. - -© 2012 - 2018, http://ernestmicklei.com. MIT License. Contributions are welcome. diff --git a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/Srcfile b/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/Srcfile deleted file mode 100644 index 16fd186..0000000 --- a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/Srcfile +++ /dev/null @@ -1 +0,0 @@ -{"SkipDirs": ["examples"]} diff --git a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/bench_test.sh b/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/bench_test.sh deleted file mode 100644 index 47ffbe4..0000000 --- a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/bench_test.sh +++ /dev/null @@ -1,10 +0,0 @@ -#go test -run=none -file bench_test.go -test.bench . -cpuprofile=bench_test.out - -go test -c -./go-restful.test -test.run=none -test.cpuprofile=tmp.prof -test.bench=BenchmarkMany -./go-restful.test -test.run=none -test.cpuprofile=curly.prof -test.bench=BenchmarkManyCurly - -#go tool pprof go-restful.test tmp.prof -go tool pprof go-restful.test curly.prof - - diff --git a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/compress.go b/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/compress.go deleted file mode 100644 index 220b377..0000000 --- a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/compress.go +++ /dev/null @@ -1,123 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "bufio" - "compress/gzip" - "compress/zlib" - "errors" - "io" - "net" - "net/http" - "strings" -) - -// OBSOLETE : use restful.DefaultContainer.EnableContentEncoding(true) to change this setting. -var EnableContentEncoding = false - -// CompressingResponseWriter is a http.ResponseWriter that can perform content encoding (gzip and zlib) -type CompressingResponseWriter struct { - writer http.ResponseWriter - compressor io.WriteCloser - encoding string -} - -// Header is part of http.ResponseWriter interface -func (c *CompressingResponseWriter) Header() http.Header { - return c.writer.Header() -} - -// WriteHeader is part of http.ResponseWriter interface -func (c *CompressingResponseWriter) WriteHeader(status int) { - c.writer.WriteHeader(status) -} - -// Write is part of http.ResponseWriter interface -// It is passed through the compressor -func (c *CompressingResponseWriter) Write(bytes []byte) (int, error) { - if c.isCompressorClosed() { - return -1, errors.New("Compressing error: tried to write data using closed compressor") - } - return c.compressor.Write(bytes) -} - -// CloseNotify is part of http.CloseNotifier interface -func (c *CompressingResponseWriter) CloseNotify() <-chan bool { - return c.writer.(http.CloseNotifier).CloseNotify() -} - -// Close the underlying compressor -func (c *CompressingResponseWriter) Close() error { - if c.isCompressorClosed() { - return errors.New("Compressing error: tried to close already closed compressor") - } - - c.compressor.Close() - if ENCODING_GZIP == c.encoding { - currentCompressorProvider.ReleaseGzipWriter(c.compressor.(*gzip.Writer)) - } - if ENCODING_DEFLATE == c.encoding { - currentCompressorProvider.ReleaseZlibWriter(c.compressor.(*zlib.Writer)) - } - // gc hint needed? - c.compressor = nil - return nil -} - -func (c *CompressingResponseWriter) isCompressorClosed() bool { - return nil == c.compressor -} - -// Hijack implements the Hijacker interface -// This is especially useful when combining Container.EnabledContentEncoding -// in combination with websockets (for instance gorilla/websocket) -func (c *CompressingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { - hijacker, ok := c.writer.(http.Hijacker) - if !ok { - return nil, nil, errors.New("ResponseWriter doesn't support Hijacker interface") - } - return hijacker.Hijack() -} - -// WantsCompressedResponse reads the Accept-Encoding header to see if and which encoding is requested. -func wantsCompressedResponse(httpRequest *http.Request) (bool, string) { - header := httpRequest.Header.Get(HEADER_AcceptEncoding) - gi := strings.Index(header, ENCODING_GZIP) - zi := strings.Index(header, ENCODING_DEFLATE) - // use in order of appearance - if gi == -1 { - return zi != -1, ENCODING_DEFLATE - } else if zi == -1 { - return gi != -1, ENCODING_GZIP - } else { - if gi < zi { - return true, ENCODING_GZIP - } - return true, ENCODING_DEFLATE - } -} - -// NewCompressingResponseWriter create a CompressingResponseWriter for a known encoding = {gzip,deflate} -func NewCompressingResponseWriter(httpWriter http.ResponseWriter, encoding string) (*CompressingResponseWriter, error) { - httpWriter.Header().Set(HEADER_ContentEncoding, encoding) - c := new(CompressingResponseWriter) - c.writer = httpWriter - var err error - if ENCODING_GZIP == encoding { - w := currentCompressorProvider.AcquireGzipWriter() - w.Reset(httpWriter) - c.compressor = w - c.encoding = ENCODING_GZIP - } else if ENCODING_DEFLATE == encoding { - w := currentCompressorProvider.AcquireZlibWriter() - w.Reset(httpWriter) - c.compressor = w - c.encoding = ENCODING_DEFLATE - } else { - return nil, errors.New("Unknown encoding:" + encoding) - } - return c, err -} diff --git a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/compressor_cache.go b/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/compressor_cache.go deleted file mode 100644 index ee42601..0000000 --- a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/compressor_cache.go +++ /dev/null @@ -1,103 +0,0 @@ -package restful - -// Copyright 2015 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "compress/gzip" - "compress/zlib" -) - -// BoundedCachedCompressors is a CompressorProvider that uses a cache with a fixed amount -// of writers and readers (resources). -// If a new resource is acquired and all are in use, it will return a new unmanaged resource. -type BoundedCachedCompressors struct { - gzipWriters chan *gzip.Writer - gzipReaders chan *gzip.Reader - zlibWriters chan *zlib.Writer - writersCapacity int - readersCapacity int -} - -// NewBoundedCachedCompressors returns a new, with filled cache, BoundedCachedCompressors. -func NewBoundedCachedCompressors(writersCapacity, readersCapacity int) *BoundedCachedCompressors { - b := &BoundedCachedCompressors{ - gzipWriters: make(chan *gzip.Writer, writersCapacity), - gzipReaders: make(chan *gzip.Reader, readersCapacity), - zlibWriters: make(chan *zlib.Writer, writersCapacity), - writersCapacity: writersCapacity, - readersCapacity: readersCapacity, - } - for ix := 0; ix < writersCapacity; ix++ { - b.gzipWriters <- newGzipWriter() - b.zlibWriters <- newZlibWriter() - } - for ix := 0; ix < readersCapacity; ix++ { - b.gzipReaders <- newGzipReader() - } - return b -} - -// AcquireGzipWriter returns an resettable *gzip.Writer. Needs to be released. -func (b *BoundedCachedCompressors) AcquireGzipWriter() *gzip.Writer { - var writer *gzip.Writer - select { - case writer, _ = <-b.gzipWriters: - default: - // return a new unmanaged one - writer = newGzipWriter() - } - return writer -} - -// ReleaseGzipWriter accepts a writer (does not have to be one that was cached) -// only when the cache has room for it. It will ignore it otherwise. -func (b *BoundedCachedCompressors) ReleaseGzipWriter(w *gzip.Writer) { - // forget the unmanaged ones - if len(b.gzipWriters) < b.writersCapacity { - b.gzipWriters <- w - } -} - -// AcquireGzipReader returns a *gzip.Reader. Needs to be released. -func (b *BoundedCachedCompressors) AcquireGzipReader() *gzip.Reader { - var reader *gzip.Reader - select { - case reader, _ = <-b.gzipReaders: - default: - // return a new unmanaged one - reader = newGzipReader() - } - return reader -} - -// ReleaseGzipReader accepts a reader (does not have to be one that was cached) -// only when the cache has room for it. It will ignore it otherwise. -func (b *BoundedCachedCompressors) ReleaseGzipReader(r *gzip.Reader) { - // forget the unmanaged ones - if len(b.gzipReaders) < b.readersCapacity { - b.gzipReaders <- r - } -} - -// AcquireZlibWriter returns an resettable *zlib.Writer. Needs to be released. -func (b *BoundedCachedCompressors) AcquireZlibWriter() *zlib.Writer { - var writer *zlib.Writer - select { - case writer, _ = <-b.zlibWriters: - default: - // return a new unmanaged one - writer = newZlibWriter() - } - return writer -} - -// ReleaseZlibWriter accepts a writer (does not have to be one that was cached) -// only when the cache has room for it. It will ignore it otherwise. -func (b *BoundedCachedCompressors) ReleaseZlibWriter(w *zlib.Writer) { - // forget the unmanaged ones - if len(b.zlibWriters) < b.writersCapacity { - b.zlibWriters <- w - } -} diff --git a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/compressor_pools.go b/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/compressor_pools.go deleted file mode 100644 index d866ce6..0000000 --- a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/compressor_pools.go +++ /dev/null @@ -1,91 +0,0 @@ -package restful - -// Copyright 2015 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "bytes" - "compress/gzip" - "compress/zlib" - "sync" -) - -// SyncPoolCompessors is a CompressorProvider that use the standard sync.Pool. -type SyncPoolCompessors struct { - GzipWriterPool *sync.Pool - GzipReaderPool *sync.Pool - ZlibWriterPool *sync.Pool -} - -// NewSyncPoolCompessors returns a new ("empty") SyncPoolCompessors. -func NewSyncPoolCompessors() *SyncPoolCompessors { - return &SyncPoolCompessors{ - GzipWriterPool: &sync.Pool{ - New: func() interface{} { return newGzipWriter() }, - }, - GzipReaderPool: &sync.Pool{ - New: func() interface{} { return newGzipReader() }, - }, - ZlibWriterPool: &sync.Pool{ - New: func() interface{} { return newZlibWriter() }, - }, - } -} - -func (s *SyncPoolCompessors) AcquireGzipWriter() *gzip.Writer { - return s.GzipWriterPool.Get().(*gzip.Writer) -} - -func (s *SyncPoolCompessors) ReleaseGzipWriter(w *gzip.Writer) { - s.GzipWriterPool.Put(w) -} - -func (s *SyncPoolCompessors) AcquireGzipReader() *gzip.Reader { - return s.GzipReaderPool.Get().(*gzip.Reader) -} - -func (s *SyncPoolCompessors) ReleaseGzipReader(r *gzip.Reader) { - s.GzipReaderPool.Put(r) -} - -func (s *SyncPoolCompessors) AcquireZlibWriter() *zlib.Writer { - return s.ZlibWriterPool.Get().(*zlib.Writer) -} - -func (s *SyncPoolCompessors) ReleaseZlibWriter(w *zlib.Writer) { - s.ZlibWriterPool.Put(w) -} - -func newGzipWriter() *gzip.Writer { - // create with an empty bytes writer; it will be replaced before using the gzipWriter - writer, err := gzip.NewWriterLevel(new(bytes.Buffer), gzip.BestSpeed) - if err != nil { - panic(err.Error()) - } - return writer -} - -func newGzipReader() *gzip.Reader { - // create with an empty reader (but with GZIP header); it will be replaced before using the gzipReader - // we can safely use currentCompressProvider because it is set on package initialization. - w := currentCompressorProvider.AcquireGzipWriter() - defer currentCompressorProvider.ReleaseGzipWriter(w) - b := new(bytes.Buffer) - w.Reset(b) - w.Flush() - w.Close() - reader, err := gzip.NewReader(bytes.NewReader(b.Bytes())) - if err != nil { - panic(err.Error()) - } - return reader -} - -func newZlibWriter() *zlib.Writer { - writer, err := zlib.NewWriterLevel(new(bytes.Buffer), gzip.BestSpeed) - if err != nil { - panic(err.Error()) - } - return writer -} diff --git a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/compressors.go b/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/compressors.go deleted file mode 100644 index 9db4a8c..0000000 --- a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/compressors.go +++ /dev/null @@ -1,54 +0,0 @@ -package restful - -// Copyright 2015 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "compress/gzip" - "compress/zlib" -) - -// CompressorProvider describes a component that can provider compressors for the std methods. -type CompressorProvider interface { - // Returns a *gzip.Writer which needs to be released later. - // Before using it, call Reset(). - AcquireGzipWriter() *gzip.Writer - - // Releases an acquired *gzip.Writer. - ReleaseGzipWriter(w *gzip.Writer) - - // Returns a *gzip.Reader which needs to be released later. - AcquireGzipReader() *gzip.Reader - - // Releases an acquired *gzip.Reader. - ReleaseGzipReader(w *gzip.Reader) - - // Returns a *zlib.Writer which needs to be released later. - // Before using it, call Reset(). - AcquireZlibWriter() *zlib.Writer - - // Releases an acquired *zlib.Writer. - ReleaseZlibWriter(w *zlib.Writer) -} - -// DefaultCompressorProvider is the actual provider of compressors (zlib or gzip). -var currentCompressorProvider CompressorProvider - -func init() { - currentCompressorProvider = NewSyncPoolCompessors() -} - -// CurrentCompressorProvider returns the current CompressorProvider. -// It is initialized using a SyncPoolCompessors. -func CurrentCompressorProvider() CompressorProvider { - return currentCompressorProvider -} - -// SetCompressorProvider sets the actual provider of compressors (zlib or gzip). -func SetCompressorProvider(p CompressorProvider) { - if p == nil { - panic("cannot set compressor provider to nil") - } - currentCompressorProvider = p -} diff --git a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/constants.go b/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/constants.go deleted file mode 100644 index 203439c..0000000 --- a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/constants.go +++ /dev/null @@ -1,30 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -const ( - MIME_XML = "application/xml" // Accept or Content-Type used in Consumes() and/or Produces() - MIME_JSON = "application/json" // Accept or Content-Type used in Consumes() and/or Produces() - MIME_OCTET = "application/octet-stream" // If Content-Type is not present in request, use the default - - HEADER_Allow = "Allow" - HEADER_Accept = "Accept" - HEADER_Origin = "Origin" - HEADER_ContentType = "Content-Type" - HEADER_LastModified = "Last-Modified" - HEADER_AcceptEncoding = "Accept-Encoding" - HEADER_ContentEncoding = "Content-Encoding" - HEADER_AccessControlExposeHeaders = "Access-Control-Expose-Headers" - HEADER_AccessControlRequestMethod = "Access-Control-Request-Method" - HEADER_AccessControlRequestHeaders = "Access-Control-Request-Headers" - HEADER_AccessControlAllowMethods = "Access-Control-Allow-Methods" - HEADER_AccessControlAllowOrigin = "Access-Control-Allow-Origin" - HEADER_AccessControlAllowCredentials = "Access-Control-Allow-Credentials" - HEADER_AccessControlAllowHeaders = "Access-Control-Allow-Headers" - HEADER_AccessControlMaxAge = "Access-Control-Max-Age" - - ENCODING_GZIP = "gzip" - ENCODING_DEFLATE = "deflate" -) diff --git a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/container.go b/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/container.go deleted file mode 100644 index 061a8d7..0000000 --- a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/container.go +++ /dev/null @@ -1,377 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "bytes" - "errors" - "fmt" - "net/http" - "os" - "runtime" - "strings" - "sync" - - "github.com/emicklei/go-restful/log" -) - -// Container holds a collection of WebServices and a http.ServeMux to dispatch http requests. -// The requests are further dispatched to routes of WebServices using a RouteSelector -type Container struct { - webServicesLock sync.RWMutex - webServices []*WebService - ServeMux *http.ServeMux - isRegisteredOnRoot bool - containerFilters []FilterFunction - doNotRecover bool // default is true - recoverHandleFunc RecoverHandleFunction - serviceErrorHandleFunc ServiceErrorHandleFunction - router RouteSelector // default is a CurlyRouter (RouterJSR311 is a slower alternative) - contentEncodingEnabled bool // default is false -} - -// NewContainer creates a new Container using a new ServeMux and default router (CurlyRouter) -func NewContainer() *Container { - return &Container{ - webServices: []*WebService{}, - ServeMux: http.NewServeMux(), - isRegisteredOnRoot: false, - containerFilters: []FilterFunction{}, - doNotRecover: true, - recoverHandleFunc: logStackOnRecover, - serviceErrorHandleFunc: writeServiceError, - router: CurlyRouter{}, - contentEncodingEnabled: false} -} - -// RecoverHandleFunction declares functions that can be used to handle a panic situation. -// The first argument is what recover() returns. The second must be used to communicate an error response. -type RecoverHandleFunction func(interface{}, http.ResponseWriter) - -// RecoverHandler changes the default function (logStackOnRecover) to be called -// when a panic is detected. DoNotRecover must be have its default value (=false). -func (c *Container) RecoverHandler(handler RecoverHandleFunction) { - c.recoverHandleFunc = handler -} - -// ServiceErrorHandleFunction declares functions that can be used to handle a service error situation. -// The first argument is the service error, the second is the request that resulted in the error and -// the third must be used to communicate an error response. -type ServiceErrorHandleFunction func(ServiceError, *Request, *Response) - -// ServiceErrorHandler changes the default function (writeServiceError) to be called -// when a ServiceError is detected. -func (c *Container) ServiceErrorHandler(handler ServiceErrorHandleFunction) { - c.serviceErrorHandleFunc = handler -} - -// DoNotRecover controls whether panics will be caught to return HTTP 500. -// If set to true, Route functions are responsible for handling any error situation. -// Default value is true. -func (c *Container) DoNotRecover(doNot bool) { - c.doNotRecover = doNot -} - -// Router changes the default Router (currently CurlyRouter) -func (c *Container) Router(aRouter RouteSelector) { - c.router = aRouter -} - -// EnableContentEncoding (default=false) allows for GZIP or DEFLATE encoding of responses. -func (c *Container) EnableContentEncoding(enabled bool) { - c.contentEncodingEnabled = enabled -} - -// Add a WebService to the Container. It will detect duplicate root paths and exit in that case. -func (c *Container) Add(service *WebService) *Container { - c.webServicesLock.Lock() - defer c.webServicesLock.Unlock() - - // if rootPath was not set then lazy initialize it - if len(service.rootPath) == 0 { - service.Path("/") - } - - // cannot have duplicate root paths - for _, each := range c.webServices { - if each.RootPath() == service.RootPath() { - log.Printf("WebService with duplicate root path detected:['%v']", each) - os.Exit(1) - } - } - - // If not registered on root then add specific mapping - if !c.isRegisteredOnRoot { - c.isRegisteredOnRoot = c.addHandler(service, c.ServeMux) - } - c.webServices = append(c.webServices, service) - return c -} - -// addHandler may set a new HandleFunc for the serveMux -// this function must run inside the critical region protected by the webServicesLock. -// returns true if the function was registered on root ("/") -func (c *Container) addHandler(service *WebService, serveMux *http.ServeMux) bool { - pattern := fixedPrefixPath(service.RootPath()) - // check if root path registration is needed - if "/" == pattern || "" == pattern { - serveMux.HandleFunc("/", c.dispatch) - return true - } - // detect if registration already exists - alreadyMapped := false - for _, each := range c.webServices { - if each.RootPath() == service.RootPath() { - alreadyMapped = true - break - } - } - if !alreadyMapped { - serveMux.HandleFunc(pattern, c.dispatch) - if !strings.HasSuffix(pattern, "/") { - serveMux.HandleFunc(pattern+"/", c.dispatch) - } - } - return false -} - -func (c *Container) Remove(ws *WebService) error { - if c.ServeMux == http.DefaultServeMux { - errMsg := fmt.Sprintf("cannot remove a WebService from a Container using the DefaultServeMux: ['%v']", ws) - log.Print(errMsg) - return errors.New(errMsg) - } - c.webServicesLock.Lock() - defer c.webServicesLock.Unlock() - // build a new ServeMux and re-register all WebServices - newServeMux := http.NewServeMux() - newServices := []*WebService{} - newIsRegisteredOnRoot := false - for _, each := range c.webServices { - if each.rootPath != ws.rootPath { - // If not registered on root then add specific mapping - if !newIsRegisteredOnRoot { - newIsRegisteredOnRoot = c.addHandler(each, newServeMux) - } - newServices = append(newServices, each) - } - } - c.webServices, c.ServeMux, c.isRegisteredOnRoot = newServices, newServeMux, newIsRegisteredOnRoot - return nil -} - -// logStackOnRecover is the default RecoverHandleFunction and is called -// when DoNotRecover is false and the recoverHandleFunc is not set for the container. -// Default implementation logs the stacktrace and writes the stacktrace on the response. -// This may be a security issue as it exposes sourcecode information. -func logStackOnRecover(panicReason interface{}, httpWriter http.ResponseWriter) { - var buffer bytes.Buffer - buffer.WriteString(fmt.Sprintf("recover from panic situation: - %v\r\n", panicReason)) - for i := 2; ; i += 1 { - _, file, line, ok := runtime.Caller(i) - if !ok { - break - } - buffer.WriteString(fmt.Sprintf(" %s:%d\r\n", file, line)) - } - log.Print(buffer.String()) - httpWriter.WriteHeader(http.StatusInternalServerError) - httpWriter.Write(buffer.Bytes()) -} - -// writeServiceError is the default ServiceErrorHandleFunction and is called -// when a ServiceError is returned during route selection. Default implementation -// calls resp.WriteErrorString(err.Code, err.Message) -func writeServiceError(err ServiceError, req *Request, resp *Response) { - resp.WriteErrorString(err.Code, err.Message) -} - -// Dispatch the incoming Http Request to a matching WebService. -func (c *Container) Dispatch(httpWriter http.ResponseWriter, httpRequest *http.Request) { - if httpWriter == nil { - panic("httpWriter cannot be nil") - } - if httpRequest == nil { - panic("httpRequest cannot be nil") - } - c.dispatch(httpWriter, httpRequest) -} - -// Dispatch the incoming Http Request to a matching WebService. -func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.Request) { - writer := httpWriter - - // CompressingResponseWriter should be closed after all operations are done - defer func() { - if compressWriter, ok := writer.(*CompressingResponseWriter); ok { - compressWriter.Close() - } - }() - - // Instal panic recovery unless told otherwise - if !c.doNotRecover { // catch all for 500 response - defer func() { - if r := recover(); r != nil { - c.recoverHandleFunc(r, writer) - return - } - }() - } - - // Find best match Route ; err is non nil if no match was found - var webService *WebService - var route *Route - var err error - func() { - c.webServicesLock.RLock() - defer c.webServicesLock.RUnlock() - webService, route, err = c.router.SelectRoute( - c.webServices, - httpRequest) - }() - - // Detect if compression is needed - // assume without compression, test for override - contentEncodingEnabled := c.contentEncodingEnabled - if route != nil && route.contentEncodingEnabled != nil { - contentEncodingEnabled = *route.contentEncodingEnabled - } - if contentEncodingEnabled { - doCompress, encoding := wantsCompressedResponse(httpRequest) - if doCompress { - var err error - writer, err = NewCompressingResponseWriter(httpWriter, encoding) - if err != nil { - log.Print("unable to install compressor: ", err) - httpWriter.WriteHeader(http.StatusInternalServerError) - return - } - } - } - - if err != nil { - // a non-200 response has already been written - // run container filters anyway ; they should not touch the response... - chain := FilterChain{Filters: c.containerFilters, Target: func(req *Request, resp *Response) { - switch err.(type) { - case ServiceError: - ser := err.(ServiceError) - c.serviceErrorHandleFunc(ser, req, resp) - } - // TODO - }} - chain.ProcessFilter(NewRequest(httpRequest), NewResponse(writer)) - return - } - pathProcessor, routerProcessesPath := c.router.(PathProcessor) - if !routerProcessesPath { - pathProcessor = defaultPathProcessor{} - } - pathParams := pathProcessor.ExtractParameters(route, webService, httpRequest.URL.Path) - wrappedRequest, wrappedResponse := route.wrapRequestResponse(writer, httpRequest, pathParams) - // pass through filters (if any) - if len(c.containerFilters)+len(webService.filters)+len(route.Filters) > 0 { - // compose filter chain - allFilters := []FilterFunction{} - allFilters = append(allFilters, c.containerFilters...) - allFilters = append(allFilters, webService.filters...) - allFilters = append(allFilters, route.Filters...) - chain := FilterChain{Filters: allFilters, Target: func(req *Request, resp *Response) { - // handle request by route after passing all filters - route.Function(wrappedRequest, wrappedResponse) - }} - chain.ProcessFilter(wrappedRequest, wrappedResponse) - } else { - // no filters, handle request by route - route.Function(wrappedRequest, wrappedResponse) - } -} - -// fixedPrefixPath returns the fixed part of the partspec ; it may include template vars {} -func fixedPrefixPath(pathspec string) string { - varBegin := strings.Index(pathspec, "{") - if -1 == varBegin { - return pathspec - } - return pathspec[:varBegin] -} - -// ServeHTTP implements net/http.Handler therefore a Container can be a Handler in a http.Server -func (c *Container) ServeHTTP(httpwriter http.ResponseWriter, httpRequest *http.Request) { - c.ServeMux.ServeHTTP(httpwriter, httpRequest) -} - -// Handle registers the handler for the given pattern. If a handler already exists for pattern, Handle panics. -func (c *Container) Handle(pattern string, handler http.Handler) { - c.ServeMux.Handle(pattern, handler) -} - -// HandleWithFilter registers the handler for the given pattern. -// Container's filter chain is applied for handler. -// If a handler already exists for pattern, HandleWithFilter panics. -func (c *Container) HandleWithFilter(pattern string, handler http.Handler) { - f := func(httpResponse http.ResponseWriter, httpRequest *http.Request) { - if len(c.containerFilters) == 0 { - handler.ServeHTTP(httpResponse, httpRequest) - return - } - - chain := FilterChain{Filters: c.containerFilters, Target: func(req *Request, resp *Response) { - handler.ServeHTTP(httpResponse, httpRequest) - }} - chain.ProcessFilter(NewRequest(httpRequest), NewResponse(httpResponse)) - } - - c.Handle(pattern, http.HandlerFunc(f)) -} - -// Filter appends a container FilterFunction. These are called before dispatching -// a http.Request to a WebService from the container -func (c *Container) Filter(filter FilterFunction) { - c.containerFilters = append(c.containerFilters, filter) -} - -// RegisteredWebServices returns the collections of added WebServices -func (c *Container) RegisteredWebServices() []*WebService { - c.webServicesLock.RLock() - defer c.webServicesLock.RUnlock() - result := make([]*WebService, len(c.webServices)) - for ix := range c.webServices { - result[ix] = c.webServices[ix] - } - return result -} - -// computeAllowedMethods returns a list of HTTP methods that are valid for a Request -func (c *Container) computeAllowedMethods(req *Request) []string { - // Go through all RegisteredWebServices() and all its Routes to collect the options - methods := []string{} - requestPath := req.Request.URL.Path - for _, ws := range c.RegisteredWebServices() { - matches := ws.pathExpr.Matcher.FindStringSubmatch(requestPath) - if matches != nil { - finalMatch := matches[len(matches)-1] - for _, rt := range ws.Routes() { - matches := rt.pathExpr.Matcher.FindStringSubmatch(finalMatch) - if matches != nil { - lastMatch := matches[len(matches)-1] - if lastMatch == "" || lastMatch == "/" { // do not include if value is neither empty nor ‘/’. - methods = append(methods, rt.Method) - } - } - } - } - } - // methods = append(methods, "OPTIONS") not sure about this - return methods -} - -// newBasicRequestResponse creates a pair of Request,Response from its http versions. -// It is basic because no parameter or (produces) content-type information is given. -func newBasicRequestResponse(httpWriter http.ResponseWriter, httpRequest *http.Request) (*Request, *Response) { - resp := NewResponse(httpWriter) - resp.requestAccept = httpRequest.Header.Get(HEADER_Accept) - return NewRequest(httpRequest), resp -} diff --git a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/cors_filter.go b/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/cors_filter.go deleted file mode 100644 index 1efeef0..0000000 --- a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/cors_filter.go +++ /dev/null @@ -1,202 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "regexp" - "strconv" - "strings" -) - -// CrossOriginResourceSharing is used to create a Container Filter that implements CORS. -// Cross-origin resource sharing (CORS) is a mechanism that allows JavaScript on a web page -// to make XMLHttpRequests to another domain, not the domain the JavaScript originated from. -// -// http://en.wikipedia.org/wiki/Cross-origin_resource_sharing -// http://enable-cors.org/server.html -// http://www.html5rocks.com/en/tutorials/cors/#toc-handling-a-not-so-simple-request -type CrossOriginResourceSharing struct { - ExposeHeaders []string // list of Header names - AllowedHeaders []string // list of Header names - AllowedDomains []string // list of allowed values for Http Origin. An allowed value can be a regular expression to support subdomain matching. If empty all are allowed. - AllowedMethods []string - MaxAge int // number of seconds before requiring new Options request - CookiesAllowed bool - Container *Container - - allowedOriginPatterns []*regexp.Regexp // internal field for origin regexp check. -} - -// Filter is a filter function that implements the CORS flow as documented on http://enable-cors.org/server.html -// and http://www.html5rocks.com/static/images/cors_server_flowchart.png -func (c CrossOriginResourceSharing) Filter(req *Request, resp *Response, chain *FilterChain) { - origin := req.Request.Header.Get(HEADER_Origin) - if len(origin) == 0 { - if trace { - traceLogger.Print("no Http header Origin set") - } - chain.ProcessFilter(req, resp) - return - } - if !c.isOriginAllowed(origin) { // check whether this origin is allowed - if trace { - traceLogger.Printf("HTTP Origin:%s is not part of %v, neither matches any part of %v", origin, c.AllowedDomains, c.allowedOriginPatterns) - } - chain.ProcessFilter(req, resp) - return - } - if req.Request.Method != "OPTIONS" { - c.doActualRequest(req, resp) - chain.ProcessFilter(req, resp) - return - } - if acrm := req.Request.Header.Get(HEADER_AccessControlRequestMethod); acrm != "" { - c.doPreflightRequest(req, resp) - } else { - c.doActualRequest(req, resp) - chain.ProcessFilter(req, resp) - return - } -} - -func (c CrossOriginResourceSharing) doActualRequest(req *Request, resp *Response) { - c.setOptionsHeaders(req, resp) - // continue processing the response -} - -func (c *CrossOriginResourceSharing) doPreflightRequest(req *Request, resp *Response) { - if len(c.AllowedMethods) == 0 { - if c.Container == nil { - c.AllowedMethods = DefaultContainer.computeAllowedMethods(req) - } else { - c.AllowedMethods = c.Container.computeAllowedMethods(req) - } - } - - acrm := req.Request.Header.Get(HEADER_AccessControlRequestMethod) - if !c.isValidAccessControlRequestMethod(acrm, c.AllowedMethods) { - if trace { - traceLogger.Printf("Http header %s:%s is not in %v", - HEADER_AccessControlRequestMethod, - acrm, - c.AllowedMethods) - } - return - } - acrhs := req.Request.Header.Get(HEADER_AccessControlRequestHeaders) - if len(acrhs) > 0 { - for _, each := range strings.Split(acrhs, ",") { - if !c.isValidAccessControlRequestHeader(strings.Trim(each, " ")) { - if trace { - traceLogger.Printf("Http header %s:%s is not in %v", - HEADER_AccessControlRequestHeaders, - acrhs, - c.AllowedHeaders) - } - return - } - } - } - resp.AddHeader(HEADER_AccessControlAllowMethods, strings.Join(c.AllowedMethods, ",")) - resp.AddHeader(HEADER_AccessControlAllowHeaders, acrhs) - c.setOptionsHeaders(req, resp) - - // return http 200 response, no body -} - -func (c CrossOriginResourceSharing) setOptionsHeaders(req *Request, resp *Response) { - c.checkAndSetExposeHeaders(resp) - c.setAllowOriginHeader(req, resp) - c.checkAndSetAllowCredentials(resp) - if c.MaxAge > 0 { - resp.AddHeader(HEADER_AccessControlMaxAge, strconv.Itoa(c.MaxAge)) - } -} - -func (c CrossOriginResourceSharing) isOriginAllowed(origin string) bool { - if len(origin) == 0 { - return false - } - if len(c.AllowedDomains) == 0 { - return true - } - - allowed := false - for _, domain := range c.AllowedDomains { - if domain == origin { - allowed = true - break - } - } - - if !allowed { - if len(c.allowedOriginPatterns) == 0 { - // compile allowed domains to allowed origin patterns - allowedOriginRegexps, err := compileRegexps(c.AllowedDomains) - if err != nil { - return false - } - c.allowedOriginPatterns = allowedOriginRegexps - } - - for _, pattern := range c.allowedOriginPatterns { - if allowed = pattern.MatchString(origin); allowed { - break - } - } - } - - return allowed -} - -func (c CrossOriginResourceSharing) setAllowOriginHeader(req *Request, resp *Response) { - origin := req.Request.Header.Get(HEADER_Origin) - if c.isOriginAllowed(origin) { - resp.AddHeader(HEADER_AccessControlAllowOrigin, origin) - } -} - -func (c CrossOriginResourceSharing) checkAndSetExposeHeaders(resp *Response) { - if len(c.ExposeHeaders) > 0 { - resp.AddHeader(HEADER_AccessControlExposeHeaders, strings.Join(c.ExposeHeaders, ",")) - } -} - -func (c CrossOriginResourceSharing) checkAndSetAllowCredentials(resp *Response) { - if c.CookiesAllowed { - resp.AddHeader(HEADER_AccessControlAllowCredentials, "true") - } -} - -func (c CrossOriginResourceSharing) isValidAccessControlRequestMethod(method string, allowedMethods []string) bool { - for _, each := range allowedMethods { - if each == method { - return true - } - } - return false -} - -func (c CrossOriginResourceSharing) isValidAccessControlRequestHeader(header string) bool { - for _, each := range c.AllowedHeaders { - if strings.ToLower(each) == strings.ToLower(header) { - return true - } - } - return false -} - -// Take a list of strings and compile them into a list of regular expressions. -func compileRegexps(regexpStrings []string) ([]*regexp.Regexp, error) { - regexps := []*regexp.Regexp{} - for _, regexpStr := range regexpStrings { - r, err := regexp.Compile(regexpStr) - if err != nil { - return regexps, err - } - regexps = append(regexps, r) - } - return regexps, nil -} diff --git a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/coverage.sh b/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/coverage.sh deleted file mode 100644 index e27dbf1..0000000 --- a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/coverage.sh +++ /dev/null @@ -1,2 +0,0 @@ -go test -coverprofile=coverage.out -go tool cover -html=coverage.out \ No newline at end of file diff --git a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/curly.go b/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/curly.go deleted file mode 100644 index 14d5b76..0000000 --- a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/curly.go +++ /dev/null @@ -1,164 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "net/http" - "regexp" - "sort" - "strings" -) - -// CurlyRouter expects Routes with paths that contain zero or more parameters in curly brackets. -type CurlyRouter struct{} - -// SelectRoute is part of the Router interface and returns the best match -// for the WebService and its Route for the given Request. -func (c CurlyRouter) SelectRoute( - webServices []*WebService, - httpRequest *http.Request) (selectedService *WebService, selected *Route, err error) { - - requestTokens := tokenizePath(httpRequest.URL.Path) - - detectedService := c.detectWebService(requestTokens, webServices) - if detectedService == nil { - if trace { - traceLogger.Printf("no WebService was found to match URL path:%s\n", httpRequest.URL.Path) - } - return nil, nil, NewError(http.StatusNotFound, "404: Page Not Found") - } - candidateRoutes := c.selectRoutes(detectedService, requestTokens) - if len(candidateRoutes) == 0 { - if trace { - traceLogger.Printf("no Route in WebService with path %s was found to match URL path:%s\n", detectedService.rootPath, httpRequest.URL.Path) - } - return detectedService, nil, NewError(http.StatusNotFound, "404: Page Not Found") - } - selectedRoute, err := c.detectRoute(candidateRoutes, httpRequest) - if selectedRoute == nil { - return detectedService, nil, err - } - return detectedService, selectedRoute, nil -} - -// selectRoutes return a collection of Route from a WebService that matches the path tokens from the request. -func (c CurlyRouter) selectRoutes(ws *WebService, requestTokens []string) sortableCurlyRoutes { - candidates := make(sortableCurlyRoutes, 0, 8) - for _, each := range ws.routes { - matches, paramCount, staticCount := c.matchesRouteByPathTokens(each.pathParts, requestTokens) - if matches { - candidates.add(curlyRoute{each, paramCount, staticCount}) // TODO make sure Routes() return pointers? - } - } - sort.Sort(candidates) - return candidates -} - -// matchesRouteByPathTokens computes whether it matches, howmany parameters do match and what the number of static path elements are. -func (c CurlyRouter) matchesRouteByPathTokens(routeTokens, requestTokens []string) (matches bool, paramCount int, staticCount int) { - if len(routeTokens) < len(requestTokens) { - // proceed in matching only if last routeToken is wildcard - count := len(routeTokens) - if count == 0 || !strings.HasSuffix(routeTokens[count-1], "*}") { - return false, 0, 0 - } - // proceed - } - for i, routeToken := range routeTokens { - if i == len(requestTokens) { - // reached end of request path - return false, 0, 0 - } - requestToken := requestTokens[i] - if strings.HasPrefix(routeToken, "{") { - paramCount++ - if colon := strings.Index(routeToken, ":"); colon != -1 { - // match by regex - matchesToken, matchesRemainder := c.regularMatchesPathToken(routeToken, colon, requestToken) - if !matchesToken { - return false, 0, 0 - } - if matchesRemainder { - break - } - } - } else { // no { prefix - if requestToken != routeToken { - return false, 0, 0 - } - staticCount++ - } - } - return true, paramCount, staticCount -} - -// regularMatchesPathToken tests whether the regular expression part of routeToken matches the requestToken or all remaining tokens -// format routeToken is {someVar:someExpression}, e.g. {zipcode:[\d][\d][\d][\d][A-Z][A-Z]} -func (c CurlyRouter) regularMatchesPathToken(routeToken string, colon int, requestToken string) (matchesToken bool, matchesRemainder bool) { - regPart := routeToken[colon+1 : len(routeToken)-1] - if regPart == "*" { - if trace { - traceLogger.Printf("wildcard parameter detected in route token %s that matches %s\n", routeToken, requestToken) - } - return true, true - } - matched, err := regexp.MatchString(regPart, requestToken) - return (matched && err == nil), false -} - -var jsr311Router = RouterJSR311{} - -// detectRoute selectes from a list of Route the first match by inspecting both the Accept and Content-Type -// headers of the Request. See also RouterJSR311 in jsr311.go -func (c CurlyRouter) detectRoute(candidateRoutes sortableCurlyRoutes, httpRequest *http.Request) (*Route, error) { - // tracing is done inside detectRoute - return jsr311Router.detectRoute(candidateRoutes.routes(), httpRequest) -} - -// detectWebService returns the best matching webService given the list of path tokens. -// see also computeWebserviceScore -func (c CurlyRouter) detectWebService(requestTokens []string, webServices []*WebService) *WebService { - var best *WebService - score := -1 - for _, each := range webServices { - matches, eachScore := c.computeWebserviceScore(requestTokens, each.pathExpr.tokens) - if matches && (eachScore > score) { - best = each - score = eachScore - } - } - return best -} - -// computeWebserviceScore returns whether tokens match and -// the weighted score of the longest matching consecutive tokens from the beginning. -func (c CurlyRouter) computeWebserviceScore(requestTokens []string, tokens []string) (bool, int) { - if len(tokens) > len(requestTokens) { - return false, 0 - } - score := 0 - for i := 0; i < len(tokens); i++ { - each := requestTokens[i] - other := tokens[i] - if len(each) == 0 && len(other) == 0 { - score++ - continue - } - if len(other) > 0 && strings.HasPrefix(other, "{") { - // no empty match - if len(each) == 0 { - return false, score - } - score += 1 - } else { - // not a parameter - if each != other { - return false, score - } - score += (len(tokens) - i) * 10 //fuzzy - } - } - return true, score -} diff --git a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/curly_route.go b/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/curly_route.go deleted file mode 100644 index 403dd3b..0000000 --- a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/curly_route.go +++ /dev/null @@ -1,54 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -// curlyRoute exits for sorting Routes by the CurlyRouter based on number of parameters and number of static path elements. -type curlyRoute struct { - route Route - paramCount int - staticCount int -} - -// sortableCurlyRoutes orders by most parameters and path elements first. -type sortableCurlyRoutes []curlyRoute - -func (s *sortableCurlyRoutes) add(route curlyRoute) { - *s = append(*s, route) -} - -func (s sortableCurlyRoutes) routes() (routes []Route) { - routes = make([]Route, 0, len(s)) - for _, each := range s { - routes = append(routes, each.route) // TODO change return type - } - return routes -} - -func (s sortableCurlyRoutes) Len() int { - return len(s) -} -func (s sortableCurlyRoutes) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} -func (s sortableCurlyRoutes) Less(i, j int) bool { - a := s[j] - b := s[i] - - // primary key - if a.staticCount < b.staticCount { - return true - } - if a.staticCount > b.staticCount { - return false - } - // secundary key - if a.paramCount < b.paramCount { - return true - } - if a.paramCount > b.paramCount { - return false - } - return a.route.Path < b.route.Path -} diff --git a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/doc.go b/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/doc.go deleted file mode 100644 index f7c16b0..0000000 --- a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/doc.go +++ /dev/null @@ -1,185 +0,0 @@ -/* -Package restful , a lean package for creating REST-style WebServices without magic. - -WebServices and Routes - -A WebService has a collection of Route objects that dispatch incoming Http Requests to a function calls. -Typically, a WebService has a root path (e.g. /users) and defines common MIME types for its routes. -WebServices must be added to a container (see below) in order to handler Http requests from a server. - -A Route is defined by a HTTP method, an URL path and (optionally) the MIME types it consumes (Content-Type) and produces (Accept). -This package has the logic to find the best matching Route and if found, call its Function. - - ws := new(restful.WebService) - ws. - Path("/users"). - Consumes(restful.MIME_JSON, restful.MIME_XML). - Produces(restful.MIME_JSON, restful.MIME_XML) - - ws.Route(ws.GET("/{user-id}").To(u.findUser)) // u is a UserResource - - ... - - // GET http://localhost:8080/users/1 - func (u UserResource) findUser(request *restful.Request, response *restful.Response) { - id := request.PathParameter("user-id") - ... - } - -The (*Request, *Response) arguments provide functions for reading information from the request and writing information back to the response. - -See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-user-resource.go with a full implementation. - -Regular expression matching Routes - -A Route parameter can be specified using the format "uri/{var[:regexp]}" or the special version "uri/{var:*}" for matching the tail of the path. -For example, /persons/{name:[A-Z][A-Z]} can be used to restrict values for the parameter "name" to only contain capital alphabetic characters. -Regular expressions must use the standard Go syntax as described in the regexp package. (https://code.google.com/p/re2/wiki/Syntax) -This feature requires the use of a CurlyRouter. - -Containers - -A Container holds a collection of WebServices, Filters and a http.ServeMux for multiplexing http requests. -Using the statements "restful.Add(...) and restful.Filter(...)" will register WebServices and Filters to the Default Container. -The Default container of go-restful uses the http.DefaultServeMux. -You can create your own Container and create a new http.Server for that particular container. - - container := restful.NewContainer() - server := &http.Server{Addr: ":8081", Handler: container} - -Filters - -A filter dynamically intercepts requests and responses to transform or use the information contained in the requests or responses. -You can use filters to perform generic logging, measurement, authentication, redirect, set response headers etc. -In the restful package there are three hooks into the request,response flow where filters can be added. -Each filter must define a FilterFunction: - - func (req *restful.Request, resp *restful.Response, chain *restful.FilterChain) - -Use the following statement to pass the request,response pair to the next filter or RouteFunction - - chain.ProcessFilter(req, resp) - -Container Filters - -These are processed before any registered WebService. - - // install a (global) filter for the default container (processed before any webservice) - restful.Filter(globalLogging) - -WebService Filters - -These are processed before any Route of a WebService. - - // install a webservice filter (processed before any route) - ws.Filter(webserviceLogging).Filter(measureTime) - - -Route Filters - -These are processed before calling the function associated with the Route. - - // install 2 chained route filters (processed before calling findUser) - ws.Route(ws.GET("/{user-id}").Filter(routeLogging).Filter(NewCountFilter().routeCounter).To(findUser)) - -See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-filters.go with full implementations. - -Response Encoding - -Two encodings are supported: gzip and deflate. To enable this for all responses: - - restful.DefaultContainer.EnableContentEncoding(true) - -If a Http request includes the Accept-Encoding header then the response content will be compressed using the specified encoding. -Alternatively, you can create a Filter that performs the encoding and install it per WebService or Route. - -See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-encoding-filter.go - -OPTIONS support - -By installing a pre-defined container filter, your Webservice(s) can respond to the OPTIONS Http request. - - Filter(OPTIONSFilter()) - -CORS - -By installing the filter of a CrossOriginResourceSharing (CORS), your WebService(s) can handle CORS requests. - - cors := CrossOriginResourceSharing{ExposeHeaders: []string{"X-My-Header"}, CookiesAllowed: false, Container: DefaultContainer} - Filter(cors.Filter) - -Error Handling - -Unexpected things happen. If a request cannot be processed because of a failure, your service needs to tell via the response what happened and why. -For this reason HTTP status codes exist and it is important to use the correct code in every exceptional situation. - - 400: Bad Request - -If path or query parameters are not valid (content or type) then use http.StatusBadRequest. - - 404: Not Found - -Despite a valid URI, the resource requested may not be available - - 500: Internal Server Error - -If the application logic could not process the request (or write the response) then use http.StatusInternalServerError. - - 405: Method Not Allowed - -The request has a valid URL but the method (GET,PUT,POST,...) is not allowed. - - 406: Not Acceptable - -The request does not have or has an unknown Accept Header set for this operation. - - 415: Unsupported Media Type - -The request does not have or has an unknown Content-Type Header set for this operation. - -ServiceError - -In addition to setting the correct (error) Http status code, you can choose to write a ServiceError message on the response. - -Performance options - -This package has several options that affect the performance of your service. It is important to understand them and how you can change it. - - restful.DefaultContainer.DoNotRecover(false) - -DoNotRecover controls whether panics will be caught to return HTTP 500. -If set to false, the container will recover from panics. -Default value is true - - restful.SetCompressorProvider(NewBoundedCachedCompressors(20, 20)) - -If content encoding is enabled then the default strategy for getting new gzip/zlib writers and readers is to use a sync.Pool. -Because writers are expensive structures, performance is even more improved when using a preloaded cache. You can also inject your own implementation. - -Trouble shooting - -This package has the means to produce detail logging of the complete Http request matching process and filter invocation. -Enabling this feature requires you to set an implementation of restful.StdLogger (e.g. log.Logger) instance such as: - - restful.TraceLogger(log.New(os.Stdout, "[restful] ", log.LstdFlags|log.Lshortfile)) - -Logging - -The restful.SetLogger() method allows you to override the logger used by the package. By default restful -uses the standard library `log` package and logs to stdout. Different logging packages are supported as -long as they conform to `StdLogger` interface defined in the `log` sub-package, writing an adapter for your -preferred package is simple. - -Resources - -[project]: https://github.com/emicklei/go-restful - -[examples]: https://github.com/emicklei/go-restful/blob/master/examples - -[design]: http://ernestmicklei.com/2012/11/11/go-restful-api-design/ - -[showcases]: https://github.com/emicklei/mora, https://github.com/emicklei/landskape - -(c) 2012-2015, http://ernestmicklei.com. MIT License -*/ -package restful diff --git a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/entity_accessors.go b/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/entity_accessors.go deleted file mode 100644 index 66dfc82..0000000 --- a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/entity_accessors.go +++ /dev/null @@ -1,162 +0,0 @@ -package restful - -// Copyright 2015 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "encoding/xml" - "strings" - "sync" -) - -// EntityReaderWriter can read and write values using an encoding such as JSON,XML. -type EntityReaderWriter interface { - // Read a serialized version of the value from the request. - // The Request may have a decompressing reader. Depends on Content-Encoding. - Read(req *Request, v interface{}) error - - // Write a serialized version of the value on the response. - // The Response may have a compressing writer. Depends on Accept-Encoding. - // status should be a valid Http Status code - Write(resp *Response, status int, v interface{}) error -} - -// entityAccessRegistry is a singleton -var entityAccessRegistry = &entityReaderWriters{ - protection: new(sync.RWMutex), - accessors: map[string]EntityReaderWriter{}, -} - -// entityReaderWriters associates MIME to an EntityReaderWriter -type entityReaderWriters struct { - protection *sync.RWMutex - accessors map[string]EntityReaderWriter -} - -func init() { - RegisterEntityAccessor(MIME_JSON, NewEntityAccessorJSON(MIME_JSON)) - RegisterEntityAccessor(MIME_XML, NewEntityAccessorXML(MIME_XML)) -} - -// RegisterEntityAccessor add/overrides the ReaderWriter for encoding content with this MIME type. -func RegisterEntityAccessor(mime string, erw EntityReaderWriter) { - entityAccessRegistry.protection.Lock() - defer entityAccessRegistry.protection.Unlock() - entityAccessRegistry.accessors[mime] = erw -} - -// NewEntityAccessorJSON returns a new EntityReaderWriter for accessing JSON content. -// This package is already initialized with such an accessor using the MIME_JSON contentType. -func NewEntityAccessorJSON(contentType string) EntityReaderWriter { - return entityJSONAccess{ContentType: contentType} -} - -// NewEntityAccessorXML returns a new EntityReaderWriter for accessing XML content. -// This package is already initialized with such an accessor using the MIME_XML contentType. -func NewEntityAccessorXML(contentType string) EntityReaderWriter { - return entityXMLAccess{ContentType: contentType} -} - -// accessorAt returns the registered ReaderWriter for this MIME type. -func (r *entityReaderWriters) accessorAt(mime string) (EntityReaderWriter, bool) { - r.protection.RLock() - defer r.protection.RUnlock() - er, ok := r.accessors[mime] - if !ok { - // retry with reverse lookup - // more expensive but we are in an exceptional situation anyway - for k, v := range r.accessors { - if strings.Contains(mime, k) { - return v, true - } - } - } - return er, ok -} - -// entityXMLAccess is a EntityReaderWriter for XML encoding -type entityXMLAccess struct { - // This is used for setting the Content-Type header when writing - ContentType string -} - -// Read unmarshalls the value from XML -func (e entityXMLAccess) Read(req *Request, v interface{}) error { - return xml.NewDecoder(req.Request.Body).Decode(v) -} - -// Write marshalls the value to JSON and set the Content-Type Header. -func (e entityXMLAccess) Write(resp *Response, status int, v interface{}) error { - return writeXML(resp, status, e.ContentType, v) -} - -// writeXML marshalls the value to JSON and set the Content-Type Header. -func writeXML(resp *Response, status int, contentType string, v interface{}) error { - if v == nil { - resp.WriteHeader(status) - // do not write a nil representation - return nil - } - if resp.prettyPrint { - // pretty output must be created and written explicitly - output, err := xml.MarshalIndent(v, " ", " ") - if err != nil { - return err - } - resp.Header().Set(HEADER_ContentType, contentType) - resp.WriteHeader(status) - _, err = resp.Write([]byte(xml.Header)) - if err != nil { - return err - } - _, err = resp.Write(output) - return err - } - // not-so-pretty - resp.Header().Set(HEADER_ContentType, contentType) - resp.WriteHeader(status) - return xml.NewEncoder(resp).Encode(v) -} - -// entityJSONAccess is a EntityReaderWriter for JSON encoding -type entityJSONAccess struct { - // This is used for setting the Content-Type header when writing - ContentType string -} - -// Read unmarshalls the value from JSON -func (e entityJSONAccess) Read(req *Request, v interface{}) error { - decoder := NewDecoder(req.Request.Body) - decoder.UseNumber() - return decoder.Decode(v) -} - -// Write marshalls the value to JSON and set the Content-Type Header. -func (e entityJSONAccess) Write(resp *Response, status int, v interface{}) error { - return writeJSON(resp, status, e.ContentType, v) -} - -// write marshalls the value to JSON and set the Content-Type Header. -func writeJSON(resp *Response, status int, contentType string, v interface{}) error { - if v == nil { - resp.WriteHeader(status) - // do not write a nil representation - return nil - } - if resp.prettyPrint { - // pretty output must be created and written explicitly - output, err := MarshalIndent(v, "", " ") - if err != nil { - return err - } - resp.Header().Set(HEADER_ContentType, contentType) - resp.WriteHeader(status) - _, err = resp.Write(output) - return err - } - // not-so-pretty - resp.Header().Set(HEADER_ContentType, contentType) - resp.WriteHeader(status) - return NewEncoder(resp).Encode(v) -} diff --git a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/filter.go b/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/filter.go deleted file mode 100644 index c23bfb5..0000000 --- a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/filter.go +++ /dev/null @@ -1,35 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -// FilterChain is a request scoped object to process one or more filters before calling the target RouteFunction. -type FilterChain struct { - Filters []FilterFunction // ordered list of FilterFunction - Index int // index into filters that is currently in progress - Target RouteFunction // function to call after passing all filters -} - -// ProcessFilter passes the request,response pair through the next of Filters. -// Each filter can decide to proceed to the next Filter or handle the Response itself. -func (f *FilterChain) ProcessFilter(request *Request, response *Response) { - if f.Index < len(f.Filters) { - f.Index++ - f.Filters[f.Index-1](request, response, f) - } else { - f.Target(request, response) - } -} - -// FilterFunction definitions must call ProcessFilter on the FilterChain to pass on the control and eventually call the RouteFunction -type FilterFunction func(*Request, *Response, *FilterChain) - -// NoBrowserCacheFilter is a filter function to set HTTP headers that disable browser caching -// See examples/restful-no-cache-filter.go for usage -func NoBrowserCacheFilter(req *Request, resp *Response, chain *FilterChain) { - resp.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate") // HTTP 1.1. - resp.Header().Set("Pragma", "no-cache") // HTTP 1.0. - resp.Header().Set("Expires", "0") // Proxies. - chain.ProcessFilter(req, resp) -} diff --git a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/json.go b/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/json.go deleted file mode 100644 index 8711651..0000000 --- a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/json.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !jsoniter - -package restful - -import "encoding/json" - -var ( - MarshalIndent = json.MarshalIndent - NewDecoder = json.NewDecoder - NewEncoder = json.NewEncoder -) diff --git a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/jsoniter.go b/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/jsoniter.go deleted file mode 100644 index 11b8f8a..0000000 --- a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/jsoniter.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build jsoniter - -package restful - -import "github.com/json-iterator/go" - -var ( - json = jsoniter.ConfigCompatibleWithStandardLibrary - MarshalIndent = json.MarshalIndent - NewDecoder = json.NewDecoder - NewEncoder = json.NewEncoder -) diff --git a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/jsr311.go b/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/jsr311.go deleted file mode 100644 index 3ede189..0000000 --- a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/jsr311.go +++ /dev/null @@ -1,297 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "errors" - "fmt" - "net/http" - "sort" -) - -// RouterJSR311 implements the flow for matching Requests to Routes (and consequently Resource Functions) -// as specified by the JSR311 http://jsr311.java.net/nonav/releases/1.1/spec/spec.html. -// RouterJSR311 implements the Router interface. -// Concept of locators is not implemented. -type RouterJSR311 struct{} - -// SelectRoute is part of the Router interface and returns the best match -// for the WebService and its Route for the given Request. -func (r RouterJSR311) SelectRoute( - webServices []*WebService, - httpRequest *http.Request) (selectedService *WebService, selectedRoute *Route, err error) { - - // Identify the root resource class (WebService) - dispatcher, finalMatch, err := r.detectDispatcher(httpRequest.URL.Path, webServices) - if err != nil { - return nil, nil, NewError(http.StatusNotFound, "") - } - // Obtain the set of candidate methods (Routes) - routes := r.selectRoutes(dispatcher, finalMatch) - if len(routes) == 0 { - return dispatcher, nil, NewError(http.StatusNotFound, "404: Page Not Found") - } - - // Identify the method (Route) that will handle the request - route, ok := r.detectRoute(routes, httpRequest) - return dispatcher, route, ok -} - -// ExtractParameters is used to obtain the path parameters from the route using the same matching -// engine as the JSR 311 router. -func (r RouterJSR311) ExtractParameters(route *Route, webService *WebService, urlPath string) map[string]string { - webServiceExpr := webService.pathExpr - webServiceMatches := webServiceExpr.Matcher.FindStringSubmatch(urlPath) - pathParameters := r.extractParams(webServiceExpr, webServiceMatches) - routeExpr := route.pathExpr - routeMatches := routeExpr.Matcher.FindStringSubmatch(webServiceMatches[len(webServiceMatches)-1]) - routeParams := r.extractParams(routeExpr, routeMatches) - for key, value := range routeParams { - pathParameters[key] = value - } - return pathParameters -} - -func (RouterJSR311) extractParams(pathExpr *pathExpression, matches []string) map[string]string { - params := map[string]string{} - for i := 1; i < len(matches); i++ { - if len(pathExpr.VarNames) >= i { - params[pathExpr.VarNames[i-1]] = matches[i] - } - } - return params -} - -// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 -func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*Route, error) { - candidates := make([]*Route, 0, 8) - for i, each := range routes { - ok := true - for _, fn := range each.If { - if !fn(httpRequest) { - ok = false - break - } - } - if ok { - candidates = append(candidates, &routes[i]) - } - } - if len(candidates) == 0 { - if trace { - traceLogger.Printf("no Route found (from %d) that passes conditional checks", len(routes)) - } - return nil, NewError(http.StatusNotFound, "404: Not Found") - } - - // http method - previous := candidates - candidates = candidates[:0] - for _, each := range previous { - if httpRequest.Method == each.Method { - candidates = append(candidates, each) - } - } - if len(candidates) == 0 { - if trace { - traceLogger.Printf("no Route found (in %d routes) that matches HTTP method %s\n", len(previous), httpRequest.Method) - } - return nil, NewError(http.StatusMethodNotAllowed, "405: Method Not Allowed") - } - - // content-type - contentType := httpRequest.Header.Get(HEADER_ContentType) - previous = candidates - candidates = candidates[:0] - for _, each := range previous { - if each.matchesContentType(contentType) { - candidates = append(candidates, each) - } - } - if len(candidates) == 0 { - if trace { - traceLogger.Printf("no Route found (from %d) that matches HTTP Content-Type: %s\n", len(previous), contentType) - } - if httpRequest.ContentLength > 0 { - return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type") - } - } - - // accept - previous = candidates - candidates = candidates[:0] - accept := httpRequest.Header.Get(HEADER_Accept) - if len(accept) == 0 { - accept = "*/*" - } - for _, each := range previous { - if each.matchesAccept(accept) { - candidates = append(candidates, each) - } - } - if len(candidates) == 0 { - if trace { - traceLogger.Printf("no Route found (from %d) that matches HTTP Accept: %s\n", len(previous), accept) - } - return nil, NewError(http.StatusNotAcceptable, "406: Not Acceptable") - } - // return r.bestMatchByMedia(outputMediaOk, contentType, accept), nil - return candidates[0], nil -} - -// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 -// n/m > n/* > */* -func (r RouterJSR311) bestMatchByMedia(routes []Route, contentType string, accept string) *Route { - // TODO - return &routes[0] -} - -// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 (step 2) -func (r RouterJSR311) selectRoutes(dispatcher *WebService, pathRemainder string) []Route { - filtered := &sortableRouteCandidates{} - for _, each := range dispatcher.Routes() { - pathExpr := each.pathExpr - matches := pathExpr.Matcher.FindStringSubmatch(pathRemainder) - if matches != nil { - lastMatch := matches[len(matches)-1] - if len(lastMatch) == 0 || lastMatch == "/" { // do not include if value is neither empty nor ‘/’. - filtered.candidates = append(filtered.candidates, - routeCandidate{each, len(matches) - 1, pathExpr.LiteralCount, pathExpr.VarCount}) - } - } - } - if len(filtered.candidates) == 0 { - if trace { - traceLogger.Printf("WebService on path %s has no routes that match URL path remainder:%s\n", dispatcher.rootPath, pathRemainder) - } - return []Route{} - } - sort.Sort(sort.Reverse(filtered)) - - // select other routes from candidates whoes expression matches rmatch - matchingRoutes := []Route{filtered.candidates[0].route} - for c := 1; c < len(filtered.candidates); c++ { - each := filtered.candidates[c] - if each.route.pathExpr.Matcher.MatchString(pathRemainder) { - matchingRoutes = append(matchingRoutes, each.route) - } - } - return matchingRoutes -} - -// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 (step 1) -func (r RouterJSR311) detectDispatcher(requestPath string, dispatchers []*WebService) (*WebService, string, error) { - filtered := &sortableDispatcherCandidates{} - for _, each := range dispatchers { - matches := each.pathExpr.Matcher.FindStringSubmatch(requestPath) - if matches != nil { - filtered.candidates = append(filtered.candidates, - dispatcherCandidate{each, matches[len(matches)-1], len(matches), each.pathExpr.LiteralCount, each.pathExpr.VarCount}) - } - } - if len(filtered.candidates) == 0 { - if trace { - traceLogger.Printf("no WebService was found to match URL path:%s\n", requestPath) - } - return nil, "", errors.New("not found") - } - sort.Sort(sort.Reverse(filtered)) - return filtered.candidates[0].dispatcher, filtered.candidates[0].finalMatch, nil -} - -// Types and functions to support the sorting of Routes - -type routeCandidate struct { - route Route - matchesCount int // the number of capturing groups - literalCount int // the number of literal characters (means those not resulting from template variable substitution) - nonDefaultCount int // the number of capturing groups with non-default regular expressions (i.e. not ‘([^ /]+?)’) -} - -func (r routeCandidate) expressionToMatch() string { - return r.route.pathExpr.Source -} - -func (r routeCandidate) String() string { - return fmt.Sprintf("(m=%d,l=%d,n=%d)", r.matchesCount, r.literalCount, r.nonDefaultCount) -} - -type sortableRouteCandidates struct { - candidates []routeCandidate -} - -func (rcs *sortableRouteCandidates) Len() int { - return len(rcs.candidates) -} -func (rcs *sortableRouteCandidates) Swap(i, j int) { - rcs.candidates[i], rcs.candidates[j] = rcs.candidates[j], rcs.candidates[i] -} -func (rcs *sortableRouteCandidates) Less(i, j int) bool { - ci := rcs.candidates[i] - cj := rcs.candidates[j] - // primary key - if ci.literalCount < cj.literalCount { - return true - } - if ci.literalCount > cj.literalCount { - return false - } - // secundary key - if ci.matchesCount < cj.matchesCount { - return true - } - if ci.matchesCount > cj.matchesCount { - return false - } - // tertiary key - if ci.nonDefaultCount < cj.nonDefaultCount { - return true - } - if ci.nonDefaultCount > cj.nonDefaultCount { - return false - } - // quaternary key ("source" is interpreted as Path) - return ci.route.Path < cj.route.Path -} - -// Types and functions to support the sorting of Dispatchers - -type dispatcherCandidate struct { - dispatcher *WebService - finalMatch string - matchesCount int // the number of capturing groups - literalCount int // the number of literal characters (means those not resulting from template variable substitution) - nonDefaultCount int // the number of capturing groups with non-default regular expressions (i.e. not ‘([^ /]+?)’) -} -type sortableDispatcherCandidates struct { - candidates []dispatcherCandidate -} - -func (dc *sortableDispatcherCandidates) Len() int { - return len(dc.candidates) -} -func (dc *sortableDispatcherCandidates) Swap(i, j int) { - dc.candidates[i], dc.candidates[j] = dc.candidates[j], dc.candidates[i] -} -func (dc *sortableDispatcherCandidates) Less(i, j int) bool { - ci := dc.candidates[i] - cj := dc.candidates[j] - // primary key - if ci.matchesCount < cj.matchesCount { - return true - } - if ci.matchesCount > cj.matchesCount { - return false - } - // secundary key - if ci.literalCount < cj.literalCount { - return true - } - if ci.literalCount > cj.literalCount { - return false - } - // tertiary key - return ci.nonDefaultCount < cj.nonDefaultCount -} diff --git a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/log/log.go b/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/log/log.go deleted file mode 100644 index 6cd44c7..0000000 --- a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/log/log.go +++ /dev/null @@ -1,34 +0,0 @@ -package log - -import ( - stdlog "log" - "os" -) - -// StdLogger corresponds to a minimal subset of the interface satisfied by stdlib log.Logger -type StdLogger interface { - Print(v ...interface{}) - Printf(format string, v ...interface{}) -} - -var Logger StdLogger - -func init() { - // default Logger - SetLogger(stdlog.New(os.Stderr, "[restful] ", stdlog.LstdFlags|stdlog.Lshortfile)) -} - -// SetLogger sets the logger for this package -func SetLogger(customLogger StdLogger) { - Logger = customLogger -} - -// Print delegates to the Logger -func Print(v ...interface{}) { - Logger.Print(v...) -} - -// Printf delegates to the Logger -func Printf(format string, v ...interface{}) { - Logger.Printf(format, v...) -} diff --git a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/logger.go b/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/logger.go deleted file mode 100644 index 6595df0..0000000 --- a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/logger.go +++ /dev/null @@ -1,32 +0,0 @@ -package restful - -// Copyright 2014 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. -import ( - "github.com/emicklei/go-restful/log" -) - -var trace bool = false -var traceLogger log.StdLogger - -func init() { - traceLogger = log.Logger // use the package logger by default -} - -// TraceLogger enables detailed logging of Http request matching and filter invocation. Default no logger is set. -// You may call EnableTracing() directly to enable trace logging to the package-wide logger. -func TraceLogger(logger log.StdLogger) { - traceLogger = logger - EnableTracing(logger != nil) -} - -// SetLogger exposes the setter for the global logger on the top-level package -func SetLogger(customLogger log.StdLogger) { - log.SetLogger(customLogger) -} - -// EnableTracing can be used to Trace logging on and off. -func EnableTracing(enabled bool) { - trace = enabled -} diff --git a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/mime.go b/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/mime.go deleted file mode 100644 index d7ea2b6..0000000 --- a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/mime.go +++ /dev/null @@ -1,45 +0,0 @@ -package restful - -import ( - "strconv" - "strings" -) - -type mime struct { - media string - quality float64 -} - -// insertMime adds a mime to a list and keeps it sorted by quality. -func insertMime(l []mime, e mime) []mime { - for i, each := range l { - // if current mime has lower quality then insert before - if e.quality > each.quality { - left := append([]mime{}, l[0:i]...) - return append(append(left, e), l[i:]...) - } - } - return append(l, e) -} - -// sortedMimes returns a list of mime sorted (desc) by its specified quality. -func sortedMimes(accept string) (sorted []mime) { - for _, each := range strings.Split(accept, ",") { - typeAndQuality := strings.Split(strings.Trim(each, " "), ";") - if len(typeAndQuality) == 1 { - sorted = insertMime(sorted, mime{typeAndQuality[0], 1.0}) - } else { - // take factor - parts := strings.Split(typeAndQuality[1], "=") - if len(parts) == 2 { - f, err := strconv.ParseFloat(parts[1], 64) - if err != nil { - traceLogger.Printf("unable to parse quality in %s, %v", each, err) - } else { - sorted = insertMime(sorted, mime{typeAndQuality[0], f}) - } - } - } - } - return -} diff --git a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/options_filter.go b/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/options_filter.go deleted file mode 100644 index 5c1b342..0000000 --- a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/options_filter.go +++ /dev/null @@ -1,34 +0,0 @@ -package restful - -import "strings" - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -// OPTIONSFilter is a filter function that inspects the Http Request for the OPTIONS method -// and provides the response with a set of allowed methods for the request URL Path. -// As for any filter, you can also install it for a particular WebService within a Container. -// Note: this filter is not needed when using CrossOriginResourceSharing (for CORS). -func (c *Container) OPTIONSFilter(req *Request, resp *Response, chain *FilterChain) { - if "OPTIONS" != req.Request.Method { - chain.ProcessFilter(req, resp) - return - } - - archs := req.Request.Header.Get(HEADER_AccessControlRequestHeaders) - methods := strings.Join(c.computeAllowedMethods(req), ",") - origin := req.Request.Header.Get(HEADER_Origin) - - resp.AddHeader(HEADER_Allow, methods) - resp.AddHeader(HEADER_AccessControlAllowOrigin, origin) - resp.AddHeader(HEADER_AccessControlAllowHeaders, archs) - resp.AddHeader(HEADER_AccessControlAllowMethods, methods) -} - -// OPTIONSFilter is a filter function that inspects the Http Request for the OPTIONS method -// and provides the response with a set of allowed methods for the request URL Path. -// Note: this filter is not needed when using CrossOriginResourceSharing (for CORS). -func OPTIONSFilter() FilterFunction { - return DefaultContainer.OPTIONSFilter -} diff --git a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/parameter.go b/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/parameter.go deleted file mode 100644 index e879330..0000000 --- a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/parameter.go +++ /dev/null @@ -1,143 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -const ( - // PathParameterKind = indicator of Request parameter type "path" - PathParameterKind = iota - - // QueryParameterKind = indicator of Request parameter type "query" - QueryParameterKind - - // BodyParameterKind = indicator of Request parameter type "body" - BodyParameterKind - - // HeaderParameterKind = indicator of Request parameter type "header" - HeaderParameterKind - - // FormParameterKind = indicator of Request parameter type "form" - FormParameterKind - - // CollectionFormatCSV comma separated values `foo,bar` - CollectionFormatCSV = CollectionFormat("csv") - - // CollectionFormatSSV space separated values `foo bar` - CollectionFormatSSV = CollectionFormat("ssv") - - // CollectionFormatTSV tab separated values `foo\tbar` - CollectionFormatTSV = CollectionFormat("tsv") - - // CollectionFormatPipes pipe separated values `foo|bar` - CollectionFormatPipes = CollectionFormat("pipes") - - // CollectionFormatMulti corresponds to multiple parameter instances instead of multiple values for a single - // instance `foo=bar&foo=baz`. This is valid only for QueryParameters and FormParameters - CollectionFormatMulti = CollectionFormat("multi") -) - -type CollectionFormat string - -func (cf CollectionFormat) String() string { - return string(cf) -} - -// Parameter is for documententing the parameter used in a Http Request -// ParameterData kinds are Path,Query and Body -type Parameter struct { - data *ParameterData -} - -// ParameterData represents the state of a Parameter. -// It is made public to make it accessible to e.g. the Swagger package. -type ParameterData struct { - Name, Description, DataType, DataFormat string - Kind int - Required bool - AllowableValues map[string]string - AllowMultiple bool - DefaultValue string - CollectionFormat string -} - -// Data returns the state of the Parameter -func (p *Parameter) Data() ParameterData { - return *p.data -} - -// Kind returns the parameter type indicator (see const for valid values) -func (p *Parameter) Kind() int { - return p.data.Kind -} - -func (p *Parameter) bePath() *Parameter { - p.data.Kind = PathParameterKind - return p -} -func (p *Parameter) beQuery() *Parameter { - p.data.Kind = QueryParameterKind - return p -} -func (p *Parameter) beBody() *Parameter { - p.data.Kind = BodyParameterKind - return p -} - -func (p *Parameter) beHeader() *Parameter { - p.data.Kind = HeaderParameterKind - return p -} - -func (p *Parameter) beForm() *Parameter { - p.data.Kind = FormParameterKind - return p -} - -// Required sets the required field and returns the receiver -func (p *Parameter) Required(required bool) *Parameter { - p.data.Required = required - return p -} - -// AllowMultiple sets the allowMultiple field and returns the receiver -func (p *Parameter) AllowMultiple(multiple bool) *Parameter { - p.data.AllowMultiple = multiple - return p -} - -// AllowableValues sets the allowableValues field and returns the receiver -func (p *Parameter) AllowableValues(values map[string]string) *Parameter { - p.data.AllowableValues = values - return p -} - -// DataType sets the dataType field and returns the receiver -func (p *Parameter) DataType(typeName string) *Parameter { - p.data.DataType = typeName - return p -} - -// DataFormat sets the dataFormat field for Swagger UI -func (p *Parameter) DataFormat(formatName string) *Parameter { - p.data.DataFormat = formatName - return p -} - -// DefaultValue sets the default value field and returns the receiver -func (p *Parameter) DefaultValue(stringRepresentation string) *Parameter { - p.data.DefaultValue = stringRepresentation - return p -} - -// Description sets the description value field and returns the receiver -func (p *Parameter) Description(doc string) *Parameter { - p.data.Description = doc - return p -} - -// CollectionFormat sets the collection format for an array type -func (p *Parameter) CollectionFormat(format CollectionFormat) *Parameter { - p.data.CollectionFormat = format.String() - return p -} diff --git a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/path_expression.go b/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/path_expression.go deleted file mode 100644 index 95a9a25..0000000 --- a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/path_expression.go +++ /dev/null @@ -1,74 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "bytes" - "fmt" - "regexp" - "strings" -) - -// PathExpression holds a compiled path expression (RegExp) needed to match against -// Http request paths and to extract path parameter values. -type pathExpression struct { - LiteralCount int // the number of literal characters (means those not resulting from template variable substitution) - VarNames []string // the names of parameters (enclosed by {}) in the path - VarCount int // the number of named parameters (enclosed by {}) in the path - Matcher *regexp.Regexp - Source string // Path as defined by the RouteBuilder - tokens []string -} - -// NewPathExpression creates a PathExpression from the input URL path. -// Returns an error if the path is invalid. -func newPathExpression(path string) (*pathExpression, error) { - expression, literalCount, varNames, varCount, tokens := templateToRegularExpression(path) - compiled, err := regexp.Compile(expression) - if err != nil { - return nil, err - } - return &pathExpression{literalCount, varNames, varCount, compiled, expression, tokens}, nil -} - -// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-370003.7.3 -func templateToRegularExpression(template string) (expression string, literalCount int, varNames []string, varCount int, tokens []string) { - var buffer bytes.Buffer - buffer.WriteString("^") - //tokens = strings.Split(template, "/") - tokens = tokenizePath(template) - for _, each := range tokens { - if each == "" { - continue - } - buffer.WriteString("/") - if strings.HasPrefix(each, "{") { - // check for regular expression in variable - colon := strings.Index(each, ":") - var varName string - if colon != -1 { - // extract expression - varName = strings.TrimSpace(each[1:colon]) - paramExpr := strings.TrimSpace(each[colon+1 : len(each)-1]) - if paramExpr == "*" { // special case - buffer.WriteString("(.*)") - } else { - buffer.WriteString(fmt.Sprintf("(%s)", paramExpr)) // between colon and closing moustache - } - } else { - // plain var - varName = strings.TrimSpace(each[1 : len(each)-1]) - buffer.WriteString("([^/]+?)") - } - varNames = append(varNames, varName) - varCount += 1 - } else { - literalCount += len(each) - encoded := each // TODO URI encode - buffer.WriteString(regexp.QuoteMeta(encoded)) - } - } - return strings.TrimRight(buffer.String(), "/") + "(/.*)?$", literalCount, varNames, varCount, tokens -} diff --git a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/path_processor.go b/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/path_processor.go deleted file mode 100644 index 357c723..0000000 --- a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/path_processor.go +++ /dev/null @@ -1,63 +0,0 @@ -package restful - -import ( - "bytes" - "strings" -) - -// Copyright 2018 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -// PathProcessor is extra behaviour that a Router can provide to extract path parameters from the path. -// If a Router does not implement this interface then the default behaviour will be used. -type PathProcessor interface { - // ExtractParameters gets the path parameters defined in the route and webService from the urlPath - ExtractParameters(route *Route, webService *WebService, urlPath string) map[string]string -} - -type defaultPathProcessor struct{} - -// Extract the parameters from the request url path -func (d defaultPathProcessor) ExtractParameters(r *Route, _ *WebService, urlPath string) map[string]string { - urlParts := tokenizePath(urlPath) - pathParameters := map[string]string{} - for i, key := range r.pathParts { - var value string - if i >= len(urlParts) { - value = "" - } else { - value = urlParts[i] - } - if strings.HasPrefix(key, "{") { // path-parameter - if colon := strings.Index(key, ":"); colon != -1 { - // extract by regex - regPart := key[colon+1 : len(key)-1] - keyPart := key[1:colon] - if regPart == "*" { - pathParameters[keyPart] = untokenizePath(i, urlParts) - break - } else { - pathParameters[keyPart] = value - } - } else { - // without enclosing {} - pathParameters[key[1:len(key)-1]] = value - } - } - } - return pathParameters -} - -// Untokenize back into an URL path using the slash separator -func untokenizePath(offset int, parts []string) string { - var buffer bytes.Buffer - for p := offset; p < len(parts); p++ { - buffer.WriteString(parts[p]) - // do not end - if p < len(parts)-1 { - buffer.WriteString("/") - } - } - return buffer.String() -} diff --git a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/request.go b/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/request.go deleted file mode 100644 index a20730f..0000000 --- a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/request.go +++ /dev/null @@ -1,118 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "compress/zlib" - "net/http" -) - -var defaultRequestContentType string - -// Request is a wrapper for a http Request that provides convenience methods -type Request struct { - Request *http.Request - pathParameters map[string]string - attributes map[string]interface{} // for storing request-scoped values - selectedRoutePath string // root path + route path that matched the request, e.g. /meetings/{id}/attendees -} - -func NewRequest(httpRequest *http.Request) *Request { - return &Request{ - Request: httpRequest, - pathParameters: map[string]string{}, - attributes: map[string]interface{}{}, - } // empty parameters, attributes -} - -// If ContentType is missing or */* is given then fall back to this type, otherwise -// a "Unable to unmarshal content of type:" response is returned. -// Valid values are restful.MIME_JSON and restful.MIME_XML -// Example: -// restful.DefaultRequestContentType(restful.MIME_JSON) -func DefaultRequestContentType(mime string) { - defaultRequestContentType = mime -} - -// PathParameter accesses the Path parameter value by its name -func (r *Request) PathParameter(name string) string { - return r.pathParameters[name] -} - -// PathParameters accesses the Path parameter values -func (r *Request) PathParameters() map[string]string { - return r.pathParameters -} - -// QueryParameter returns the (first) Query parameter value by its name -func (r *Request) QueryParameter(name string) string { - return r.Request.FormValue(name) -} - -// QueryParameters returns the all the query parameters values by name -func (r *Request) QueryParameters(name string) []string { - return r.Request.URL.Query()[name] -} - -// BodyParameter parses the body of the request (once for typically a POST or a PUT) and returns the value of the given name or an error. -func (r *Request) BodyParameter(name string) (string, error) { - err := r.Request.ParseForm() - if err != nil { - return "", err - } - return r.Request.PostFormValue(name), nil -} - -// HeaderParameter returns the HTTP Header value of a Header name or empty if missing -func (r *Request) HeaderParameter(name string) string { - return r.Request.Header.Get(name) -} - -// ReadEntity checks the Accept header and reads the content into the entityPointer. -func (r *Request) ReadEntity(entityPointer interface{}) (err error) { - contentType := r.Request.Header.Get(HEADER_ContentType) - contentEncoding := r.Request.Header.Get(HEADER_ContentEncoding) - - // check if the request body needs decompression - if ENCODING_GZIP == contentEncoding { - gzipReader := currentCompressorProvider.AcquireGzipReader() - defer currentCompressorProvider.ReleaseGzipReader(gzipReader) - gzipReader.Reset(r.Request.Body) - r.Request.Body = gzipReader - } else if ENCODING_DEFLATE == contentEncoding { - zlibReader, err := zlib.NewReader(r.Request.Body) - if err != nil { - return err - } - r.Request.Body = zlibReader - } - - // lookup the EntityReader, use defaultRequestContentType if needed and provided - entityReader, ok := entityAccessRegistry.accessorAt(contentType) - if !ok { - if len(defaultRequestContentType) != 0 { - entityReader, ok = entityAccessRegistry.accessorAt(defaultRequestContentType) - } - if !ok { - return NewError(http.StatusBadRequest, "Unable to unmarshal content of type:"+contentType) - } - } - return entityReader.Read(r, entityPointer) -} - -// SetAttribute adds or replaces the attribute with the given value. -func (r *Request) SetAttribute(name string, value interface{}) { - r.attributes[name] = value -} - -// Attribute returns the value associated to the given name. Returns nil if absent. -func (r Request) Attribute(name string) interface{} { - return r.attributes[name] -} - -// SelectedRoutePath root path + route path that matched the request, e.g. /meetings/{id}/attendees -func (r Request) SelectedRoutePath() string { - return r.selectedRoutePath -} diff --git a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/response.go b/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/response.go deleted file mode 100644 index 4d987d1..0000000 --- a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/response.go +++ /dev/null @@ -1,250 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "bufio" - "errors" - "net" - "net/http" -) - -// DefaultResponseMimeType is DEPRECATED, use DefaultResponseContentType(mime) -var DefaultResponseMimeType string - -//PrettyPrintResponses controls the indentation feature of XML and JSON serialization -var PrettyPrintResponses = true - -// Response is a wrapper on the actual http ResponseWriter -// It provides several convenience methods to prepare and write response content. -type Response struct { - http.ResponseWriter - requestAccept string // mime-type what the Http Request says it wants to receive - routeProduces []string // mime-types what the Route says it can produce - statusCode int // HTTP status code that has been written explicitly (if zero then net/http has written 200) - contentLength int // number of bytes written for the response body - prettyPrint bool // controls the indentation feature of XML and JSON serialization. It is initialized using var PrettyPrintResponses. - err error // err property is kept when WriteError is called - hijacker http.Hijacker // if underlying ResponseWriter supports it -} - -// NewResponse creates a new response based on a http ResponseWriter. -func NewResponse(httpWriter http.ResponseWriter) *Response { - hijacker, _ := httpWriter.(http.Hijacker) - return &Response{ResponseWriter: httpWriter, routeProduces: []string{}, statusCode: http.StatusOK, prettyPrint: PrettyPrintResponses, hijacker: hijacker} -} - -// DefaultResponseContentType set a default. -// If Accept header matching fails, fall back to this type. -// Valid values are restful.MIME_JSON and restful.MIME_XML -// Example: -// restful.DefaultResponseContentType(restful.MIME_JSON) -func DefaultResponseContentType(mime string) { - DefaultResponseMimeType = mime -} - -// InternalServerError writes the StatusInternalServerError header. -// DEPRECATED, use WriteErrorString(http.StatusInternalServerError,reason) -func (r Response) InternalServerError() Response { - r.WriteHeader(http.StatusInternalServerError) - return r -} - -// Hijack implements the http.Hijacker interface. This expands -// the Response to fulfill http.Hijacker if the underlying -// http.ResponseWriter supports it. -func (r *Response) Hijack() (net.Conn, *bufio.ReadWriter, error) { - if r.hijacker == nil { - return nil, nil, errors.New("http.Hijacker not implemented by underlying http.ResponseWriter") - } - return r.hijacker.Hijack() -} - -// PrettyPrint changes whether this response must produce pretty (line-by-line, indented) JSON or XML output. -func (r *Response) PrettyPrint(bePretty bool) { - r.prettyPrint = bePretty -} - -// AddHeader is a shortcut for .Header().Add(header,value) -func (r Response) AddHeader(header string, value string) Response { - r.Header().Add(header, value) - return r -} - -// SetRequestAccepts tells the response what Mime-type(s) the HTTP request said it wants to accept. Exposed for testing. -func (r *Response) SetRequestAccepts(mime string) { - r.requestAccept = mime -} - -// EntityWriter returns the registered EntityWriter that the entity (requested resource) -// can write according to what the request wants (Accept) and what the Route can produce or what the restful defaults say. -// If called before WriteEntity and WriteHeader then a false return value can be used to write a 406: Not Acceptable. -func (r *Response) EntityWriter() (EntityReaderWriter, bool) { - sorted := sortedMimes(r.requestAccept) - for _, eachAccept := range sorted { - for _, eachProduce := range r.routeProduces { - if eachProduce == eachAccept.media { - if w, ok := entityAccessRegistry.accessorAt(eachAccept.media); ok { - return w, true - } - } - } - if eachAccept.media == "*/*" { - for _, each := range r.routeProduces { - if w, ok := entityAccessRegistry.accessorAt(each); ok { - return w, true - } - } - } - } - // if requestAccept is empty - writer, ok := entityAccessRegistry.accessorAt(r.requestAccept) - if !ok { - // if not registered then fallback to the defaults (if set) - if DefaultResponseMimeType == MIME_JSON { - return entityAccessRegistry.accessorAt(MIME_JSON) - } - if DefaultResponseMimeType == MIME_XML { - return entityAccessRegistry.accessorAt(MIME_XML) - } - // Fallback to whatever the route says it can produce. - // https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html - for _, each := range r.routeProduces { - if w, ok := entityAccessRegistry.accessorAt(each); ok { - return w, true - } - } - if trace { - traceLogger.Printf("no registered EntityReaderWriter found for %s", r.requestAccept) - } - } - return writer, ok -} - -// WriteEntity calls WriteHeaderAndEntity with Http Status OK (200) -func (r *Response) WriteEntity(value interface{}) error { - return r.WriteHeaderAndEntity(http.StatusOK, value) -} - -// WriteHeaderAndEntity marshals the value using the representation denoted by the Accept Header and the registered EntityWriters. -// If no Accept header is specified (or */*) then respond with the Content-Type as specified by the first in the Route.Produces. -// If an Accept header is specified then respond with the Content-Type as specified by the first in the Route.Produces that is matched with the Accept header. -// If the value is nil then no response is send except for the Http status. You may want to call WriteHeader(http.StatusNotFound) instead. -// If there is no writer available that can represent the value in the requested MIME type then Http Status NotAcceptable is written. -// Current implementation ignores any q-parameters in the Accept Header. -// Returns an error if the value could not be written on the response. -func (r *Response) WriteHeaderAndEntity(status int, value interface{}) error { - writer, ok := r.EntityWriter() - if !ok { - r.WriteHeader(http.StatusNotAcceptable) - return nil - } - return writer.Write(r, status, value) -} - -// WriteAsXml is a convenience method for writing a value in xml (requires Xml tags on the value) -// It uses the standard encoding/xml package for marshalling the value ; not using a registered EntityReaderWriter. -func (r *Response) WriteAsXml(value interface{}) error { - return writeXML(r, http.StatusOK, MIME_XML, value) -} - -// WriteHeaderAndXml is a convenience method for writing a status and value in xml (requires Xml tags on the value) -// It uses the standard encoding/xml package for marshalling the value ; not using a registered EntityReaderWriter. -func (r *Response) WriteHeaderAndXml(status int, value interface{}) error { - return writeXML(r, status, MIME_XML, value) -} - -// WriteAsJson is a convenience method for writing a value in json. -// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter. -func (r *Response) WriteAsJson(value interface{}) error { - return writeJSON(r, http.StatusOK, MIME_JSON, value) -} - -// WriteJson is a convenience method for writing a value in Json with a given Content-Type. -// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter. -func (r *Response) WriteJson(value interface{}, contentType string) error { - return writeJSON(r, http.StatusOK, contentType, value) -} - -// WriteHeaderAndJson is a convenience method for writing the status and a value in Json with a given Content-Type. -// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter. -func (r *Response) WriteHeaderAndJson(status int, value interface{}, contentType string) error { - return writeJSON(r, status, contentType, value) -} - -// WriteError write the http status and the error string on the response. -func (r *Response) WriteError(httpStatus int, err error) error { - r.err = err - return r.WriteErrorString(httpStatus, err.Error()) -} - -// WriteServiceError is a convenience method for a responding with a status and a ServiceError -func (r *Response) WriteServiceError(httpStatus int, err ServiceError) error { - r.err = err - return r.WriteHeaderAndEntity(httpStatus, err) -} - -// WriteErrorString is a convenience method for an error status with the actual error -func (r *Response) WriteErrorString(httpStatus int, errorReason string) error { - if r.err == nil { - // if not called from WriteError - r.err = errors.New(errorReason) - } - r.WriteHeader(httpStatus) - if _, err := r.Write([]byte(errorReason)); err != nil { - return err - } - return nil -} - -// Flush implements http.Flusher interface, which sends any buffered data to the client. -func (r *Response) Flush() { - if f, ok := r.ResponseWriter.(http.Flusher); ok { - f.Flush() - } else if trace { - traceLogger.Printf("ResponseWriter %v doesn't support Flush", r) - } -} - -// WriteHeader is overridden to remember the Status Code that has been written. -// Changes to the Header of the response have no effect after this. -func (r *Response) WriteHeader(httpStatus int) { - r.statusCode = httpStatus - r.ResponseWriter.WriteHeader(httpStatus) -} - -// StatusCode returns the code that has been written using WriteHeader. -func (r Response) StatusCode() int { - if 0 == r.statusCode { - // no status code has been written yet; assume OK - return http.StatusOK - } - return r.statusCode -} - -// Write writes the data to the connection as part of an HTTP reply. -// Write is part of http.ResponseWriter interface. -func (r *Response) Write(bytes []byte) (int, error) { - written, err := r.ResponseWriter.Write(bytes) - r.contentLength += written - return written, err -} - -// ContentLength returns the number of bytes written for the response content. -// Note that this value is only correct if all data is written through the Response using its Write* methods. -// Data written directly using the underlying http.ResponseWriter is not accounted for. -func (r Response) ContentLength() int { - return r.contentLength -} - -// CloseNotify is part of http.CloseNotifier interface -func (r Response) CloseNotify() <-chan bool { - return r.ResponseWriter.(http.CloseNotifier).CloseNotify() -} - -// Error returns the err created by WriteError -func (r Response) Error() error { - return r.err -} diff --git a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/route.go b/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/route.go deleted file mode 100644 index 6d15dbf..0000000 --- a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/route.go +++ /dev/null @@ -1,170 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "net/http" - "strings" -) - -// RouteFunction declares the signature of a function that can be bound to a Route. -type RouteFunction func(*Request, *Response) - -// RouteSelectionConditionFunction declares the signature of a function that -// can be used to add extra conditional logic when selecting whether the route -// matches the HTTP request. -type RouteSelectionConditionFunction func(httpRequest *http.Request) bool - -// Route binds a HTTP Method,Path,Consumes combination to a RouteFunction. -type Route struct { - Method string - Produces []string - Consumes []string - Path string // webservice root path + described path - Function RouteFunction - Filters []FilterFunction - If []RouteSelectionConditionFunction - - // cached values for dispatching - relativePath string - pathParts []string - pathExpr *pathExpression // cached compilation of relativePath as RegExp - - // documentation - Doc string - Notes string - Operation string - ParameterDocs []*Parameter - ResponseErrors map[int]ResponseError - DefaultResponse *ResponseError - ReadSample, WriteSample interface{} // structs that model an example request or response payload - - // Extra information used to store custom information about the route. - Metadata map[string]interface{} - - // marks a route as deprecated - Deprecated bool - - //Overrides the container.contentEncodingEnabled - contentEncodingEnabled *bool -} - -// Initialize for Route -func (r *Route) postBuild() { - r.pathParts = tokenizePath(r.Path) -} - -// Create Request and Response from their http versions -func (r *Route) wrapRequestResponse(httpWriter http.ResponseWriter, httpRequest *http.Request, pathParams map[string]string) (*Request, *Response) { - wrappedRequest := NewRequest(httpRequest) - wrappedRequest.pathParameters = pathParams - wrappedRequest.selectedRoutePath = r.Path - wrappedResponse := NewResponse(httpWriter) - wrappedResponse.requestAccept = httpRequest.Header.Get(HEADER_Accept) - wrappedResponse.routeProduces = r.Produces - return wrappedRequest, wrappedResponse -} - -// dispatchWithFilters call the function after passing through its own filters -func (r *Route) dispatchWithFilters(wrappedRequest *Request, wrappedResponse *Response) { - if len(r.Filters) > 0 { - chain := FilterChain{Filters: r.Filters, Target: r.Function} - chain.ProcessFilter(wrappedRequest, wrappedResponse) - } else { - // unfiltered - r.Function(wrappedRequest, wrappedResponse) - } -} - -func stringTrimSpaceCutset(r rune) bool { - return r == ' ' -} - -// Return whether the mimeType matches to what this Route can produce. -func (r Route) matchesAccept(mimeTypesWithQuality string) bool { - remaining := mimeTypesWithQuality - for { - var mimeType string - if end := strings.Index(remaining, ","); end == -1 { - mimeType, remaining = remaining, "" - } else { - mimeType, remaining = remaining[:end], remaining[end+1:] - } - if quality := strings.Index(mimeType, ";"); quality != -1 { - mimeType = mimeType[:quality] - } - mimeType = strings.TrimFunc(mimeType, stringTrimSpaceCutset) - if mimeType == "*/*" { - return true - } - for _, producibleType := range r.Produces { - if producibleType == "*/*" || producibleType == mimeType { - return true - } - } - if len(remaining) == 0 { - return false - } - } -} - -// Return whether this Route can consume content with a type specified by mimeTypes (can be empty). -func (r Route) matchesContentType(mimeTypes string) bool { - - if len(r.Consumes) == 0 { - // did not specify what it can consume ; any media type (“*/*”) is assumed - return true - } - - if len(mimeTypes) == 0 { - // idempotent methods with (most-likely or guaranteed) empty content match missing Content-Type - m := r.Method - if m == "GET" || m == "HEAD" || m == "OPTIONS" || m == "DELETE" || m == "TRACE" { - return true - } - // proceed with default - mimeTypes = MIME_OCTET - } - - remaining := mimeTypes - for { - var mimeType string - if end := strings.Index(remaining, ","); end == -1 { - mimeType, remaining = remaining, "" - } else { - mimeType, remaining = remaining[:end], remaining[end+1:] - } - if quality := strings.Index(mimeType, ";"); quality != -1 { - mimeType = mimeType[:quality] - } - mimeType = strings.TrimFunc(mimeType, stringTrimSpaceCutset) - for _, consumeableType := range r.Consumes { - if consumeableType == "*/*" || consumeableType == mimeType { - return true - } - } - if len(remaining) == 0 { - return false - } - } -} - -// Tokenize an URL path using the slash separator ; the result does not have empty tokens -func tokenizePath(path string) []string { - if "/" == path { - return nil - } - return strings.Split(strings.Trim(path, "/"), "/") -} - -// for debugging -func (r Route) String() string { - return r.Method + " " + r.Path -} - -// EnableContentEncoding (default=false) allows for GZIP or DEFLATE encoding of responses. Overrides the container.contentEncodingEnabled value. -func (r Route) EnableContentEncoding(enabled bool) { - r.contentEncodingEnabled = &enabled -} diff --git a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/route_builder.go b/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/route_builder.go deleted file mode 100644 index 8dfdf93..0000000 --- a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/route_builder.go +++ /dev/null @@ -1,317 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "fmt" - "os" - "reflect" - "runtime" - "strings" - "sync/atomic" - - "github.com/emicklei/go-restful/log" -) - -// RouteBuilder is a helper to construct Routes. -type RouteBuilder struct { - rootPath string - currentPath string - produces []string - consumes []string - httpMethod string // required - function RouteFunction // required - filters []FilterFunction - conditions []RouteSelectionConditionFunction - - typeNameHandleFunc TypeNameHandleFunction // required - - // documentation - doc string - notes string - operation string - readSample, writeSample interface{} - parameters []*Parameter - errorMap map[int]ResponseError - defaultResponse *ResponseError - metadata map[string]interface{} - deprecated bool -} - -// Do evaluates each argument with the RouteBuilder itself. -// This allows you to follow DRY principles without breaking the fluent programming style. -// Example: -// ws.Route(ws.DELETE("/{name}").To(t.deletePerson).Do(Returns200, Returns500)) -// -// func Returns500(b *RouteBuilder) { -// b.Returns(500, "Internal Server Error", restful.ServiceError{}) -// } -func (b *RouteBuilder) Do(oneArgBlocks ...func(*RouteBuilder)) *RouteBuilder { - for _, each := range oneArgBlocks { - each(b) - } - return b -} - -// To bind the route to a function. -// If this route is matched with the incoming Http Request then call this function with the *Request,*Response pair. Required. -func (b *RouteBuilder) To(function RouteFunction) *RouteBuilder { - b.function = function - return b -} - -// Method specifies what HTTP method to match. Required. -func (b *RouteBuilder) Method(method string) *RouteBuilder { - b.httpMethod = method - return b -} - -// Produces specifies what MIME types can be produced ; the matched one will appear in the Content-Type Http header. -func (b *RouteBuilder) Produces(mimeTypes ...string) *RouteBuilder { - b.produces = mimeTypes - return b -} - -// Consumes specifies what MIME types can be consumes ; the Accept Http header must matched any of these -func (b *RouteBuilder) Consumes(mimeTypes ...string) *RouteBuilder { - b.consumes = mimeTypes - return b -} - -// Path specifies the relative (w.r.t WebService root path) URL path to match. Default is "/". -func (b *RouteBuilder) Path(subPath string) *RouteBuilder { - b.currentPath = subPath - return b -} - -// Doc tells what this route is all about. Optional. -func (b *RouteBuilder) Doc(documentation string) *RouteBuilder { - b.doc = documentation - return b -} - -// Notes is a verbose explanation of the operation behavior. Optional. -func (b *RouteBuilder) Notes(notes string) *RouteBuilder { - b.notes = notes - return b -} - -// Reads tells what resource type will be read from the request payload. Optional. -// A parameter of type "body" is added ,required is set to true and the dataType is set to the qualified name of the sample's type. -func (b *RouteBuilder) Reads(sample interface{}, optionalDescription ...string) *RouteBuilder { - fn := b.typeNameHandleFunc - if fn == nil { - fn = reflectTypeName - } - typeAsName := fn(sample) - description := "" - if len(optionalDescription) > 0 { - description = optionalDescription[0] - } - b.readSample = sample - bodyParameter := &Parameter{&ParameterData{Name: "body", Description: description}} - bodyParameter.beBody() - bodyParameter.Required(true) - bodyParameter.DataType(typeAsName) - b.Param(bodyParameter) - return b -} - -// ParameterNamed returns a Parameter already known to the RouteBuilder. Returns nil if not. -// Use this to modify or extend information for the Parameter (through its Data()). -func (b RouteBuilder) ParameterNamed(name string) (p *Parameter) { - for _, each := range b.parameters { - if each.Data().Name == name { - return each - } - } - return p -} - -// Writes tells what resource type will be written as the response payload. Optional. -func (b *RouteBuilder) Writes(sample interface{}) *RouteBuilder { - b.writeSample = sample - return b -} - -// Param allows you to document the parameters of the Route. It adds a new Parameter (does not check for duplicates). -func (b *RouteBuilder) Param(parameter *Parameter) *RouteBuilder { - if b.parameters == nil { - b.parameters = []*Parameter{} - } - b.parameters = append(b.parameters, parameter) - return b -} - -// Operation allows you to document what the actual method/function call is of the Route. -// Unless called, the operation name is derived from the RouteFunction set using To(..). -func (b *RouteBuilder) Operation(name string) *RouteBuilder { - b.operation = name - return b -} - -// ReturnsError is deprecated, use Returns instead. -func (b *RouteBuilder) ReturnsError(code int, message string, model interface{}) *RouteBuilder { - log.Print("ReturnsError is deprecated, use Returns instead.") - return b.Returns(code, message, model) -} - -// Returns allows you to document what responses (errors or regular) can be expected. -// The model parameter is optional ; either pass a struct instance or use nil if not applicable. -func (b *RouteBuilder) Returns(code int, message string, model interface{}) *RouteBuilder { - err := ResponseError{ - Code: code, - Message: message, - Model: model, - IsDefault: false, // this field is deprecated, use default response instead. - } - // lazy init because there is no NewRouteBuilder (yet) - if b.errorMap == nil { - b.errorMap = map[int]ResponseError{} - } - b.errorMap[code] = err - return b -} - -// DefaultReturns is a special Returns call that sets the default of the response. -func (b *RouteBuilder) DefaultReturns(message string, model interface{}) *RouteBuilder { - b.defaultResponse = &ResponseError{ - Message: message, - Model: model, - } - return b -} - -// Metadata adds or updates a key=value pair to the metadata map. -func (b *RouteBuilder) Metadata(key string, value interface{}) *RouteBuilder { - if b.metadata == nil { - b.metadata = map[string]interface{}{} - } - b.metadata[key] = value - return b -} - -// Deprecate sets the value of deprecated to true. Deprecated routes have a special UI treatment to warn against use -func (b *RouteBuilder) Deprecate() *RouteBuilder { - b.deprecated = true - return b -} - -// ResponseError represents a response; not necessarily an error. -type ResponseError struct { - Code int - Message string - Model interface{} - IsDefault bool -} - -func (b *RouteBuilder) servicePath(path string) *RouteBuilder { - b.rootPath = path - return b -} - -// Filter appends a FilterFunction to the end of filters for this Route to build. -func (b *RouteBuilder) Filter(filter FilterFunction) *RouteBuilder { - b.filters = append(b.filters, filter) - return b -} - -// If sets a condition function that controls matching the Route based on custom logic. -// The condition function is provided the HTTP request and should return true if the route -// should be considered. -// -// Efficiency note: the condition function is called before checking the method, produces, and -// consumes criteria, so that the correct HTTP status code can be returned. -// -// Lifecycle note: no filter functions have been called prior to calling the condition function, -// so the condition function should not depend on any context that might be set up by container -// or route filters. -func (b *RouteBuilder) If(condition RouteSelectionConditionFunction) *RouteBuilder { - b.conditions = append(b.conditions, condition) - return b -} - -// If no specific Route path then set to rootPath -// If no specific Produces then set to rootProduces -// If no specific Consumes then set to rootConsumes -func (b *RouteBuilder) copyDefaults(rootProduces, rootConsumes []string) { - if len(b.produces) == 0 { - b.produces = rootProduces - } - if len(b.consumes) == 0 { - b.consumes = rootConsumes - } -} - -// typeNameHandler sets the function that will convert types to strings in the parameter -// and model definitions. -func (b *RouteBuilder) typeNameHandler(handler TypeNameHandleFunction) *RouteBuilder { - b.typeNameHandleFunc = handler - return b -} - -// Build creates a new Route using the specification details collected by the RouteBuilder -func (b *RouteBuilder) Build() Route { - pathExpr, err := newPathExpression(b.currentPath) - if err != nil { - log.Printf("Invalid path:%s because:%v", b.currentPath, err) - os.Exit(1) - } - if b.function == nil { - log.Printf("No function specified for route:" + b.currentPath) - os.Exit(1) - } - operationName := b.operation - if len(operationName) == 0 && b.function != nil { - // extract from definition - operationName = nameOfFunction(b.function) - } - route := Route{ - Method: b.httpMethod, - Path: concatPath(b.rootPath, b.currentPath), - Produces: b.produces, - Consumes: b.consumes, - Function: b.function, - Filters: b.filters, - If: b.conditions, - relativePath: b.currentPath, - pathExpr: pathExpr, - Doc: b.doc, - Notes: b.notes, - Operation: operationName, - ParameterDocs: b.parameters, - ResponseErrors: b.errorMap, - DefaultResponse: b.defaultResponse, - ReadSample: b.readSample, - WriteSample: b.writeSample, - Metadata: b.metadata, - Deprecated: b.deprecated} - route.postBuild() - return route -} - -func concatPath(path1, path2 string) string { - return strings.TrimRight(path1, "/") + "/" + strings.TrimLeft(path2, "/") -} - -var anonymousFuncCount int32 - -// nameOfFunction returns the short name of the function f for documentation. -// It uses a runtime feature for debugging ; its value may change for later Go versions. -func nameOfFunction(f interface{}) string { - fun := runtime.FuncForPC(reflect.ValueOf(f).Pointer()) - tokenized := strings.Split(fun.Name(), ".") - last := tokenized[len(tokenized)-1] - last = strings.TrimSuffix(last, ")·fm") // < Go 1.5 - last = strings.TrimSuffix(last, ")-fm") // Go 1.5 - last = strings.TrimSuffix(last, "·fm") // < Go 1.5 - last = strings.TrimSuffix(last, "-fm") // Go 1.5 - if last == "func1" { // this could mean conflicts in API docs - val := atomic.AddInt32(&anonymousFuncCount, 1) - last = "func" + fmt.Sprintf("%d", val) - atomic.StoreInt32(&anonymousFuncCount, val) - } - return last -} diff --git a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/router.go b/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/router.go deleted file mode 100644 index 19078af..0000000 --- a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/router.go +++ /dev/null @@ -1,20 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import "net/http" - -// A RouteSelector finds the best matching Route given the input HTTP Request -// RouteSelectors can optionally also implement the PathProcessor interface to also calculate the -// path parameters after the route has been selected. -type RouteSelector interface { - - // SelectRoute finds a Route given the input HTTP Request and a list of WebServices. - // It returns a selected Route and its containing WebService or an error indicating - // a problem. - SelectRoute( - webServices []*WebService, - httpRequest *http.Request) (selectedService *WebService, selected *Route, err error) -} diff --git a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/service_error.go b/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/service_error.go deleted file mode 100644 index 62d1108..0000000 --- a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/service_error.go +++ /dev/null @@ -1,23 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import "fmt" - -// ServiceError is a transport object to pass information about a non-Http error occurred in a WebService while processing a request. -type ServiceError struct { - Code int - Message string -} - -// NewError returns a ServiceError using the code and reason -func NewError(code int, message string) ServiceError { - return ServiceError{Code: code, Message: message} -} - -// Error returns a text representation of the service error -func (s ServiceError) Error() string { - return fmt.Sprintf("[ServiceError:%v] %v", s.Code, s.Message) -} diff --git a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/web_service.go b/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/web_service.go deleted file mode 100644 index 77ba9a8..0000000 --- a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/web_service.go +++ /dev/null @@ -1,290 +0,0 @@ -package restful - -import ( - "errors" - "os" - "reflect" - "sync" - - "github.com/emicklei/go-restful/log" -) - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -// WebService holds a collection of Route values that bind a Http Method + URL Path to a function. -type WebService struct { - rootPath string - pathExpr *pathExpression // cached compilation of rootPath as RegExp - routes []Route - produces []string - consumes []string - pathParameters []*Parameter - filters []FilterFunction - documentation string - apiVersion string - - typeNameHandleFunc TypeNameHandleFunction - - dynamicRoutes bool - - // protects 'routes' if dynamic routes are enabled - routesLock sync.RWMutex -} - -func (w *WebService) SetDynamicRoutes(enable bool) { - w.dynamicRoutes = enable -} - -// TypeNameHandleFunction declares functions that can handle translating the name of a sample object -// into the restful documentation for the service. -type TypeNameHandleFunction func(sample interface{}) string - -// TypeNameHandler sets the function that will convert types to strings in the parameter -// and model definitions. If not set, the web service will invoke -// reflect.TypeOf(object).String(). -func (w *WebService) TypeNameHandler(handler TypeNameHandleFunction) *WebService { - w.typeNameHandleFunc = handler - return w -} - -// reflectTypeName is the default TypeNameHandleFunction and for a given object -// returns the name that Go identifies it with (e.g. "string" or "v1.Object") via -// the reflection API. -func reflectTypeName(sample interface{}) string { - return reflect.TypeOf(sample).String() -} - -// compilePathExpression ensures that the path is compiled into a RegEx for those routers that need it. -func (w *WebService) compilePathExpression() { - compiled, err := newPathExpression(w.rootPath) - if err != nil { - log.Printf("invalid path:%s because:%v", w.rootPath, err) - os.Exit(1) - } - w.pathExpr = compiled -} - -// ApiVersion sets the API version for documentation purposes. -func (w *WebService) ApiVersion(apiVersion string) *WebService { - w.apiVersion = apiVersion - return w -} - -// Version returns the API version for documentation purposes. -func (w *WebService) Version() string { return w.apiVersion } - -// Path specifies the root URL template path of the WebService. -// All Routes will be relative to this path. -func (w *WebService) Path(root string) *WebService { - w.rootPath = root - if len(w.rootPath) == 0 { - w.rootPath = "/" - } - w.compilePathExpression() - return w -} - -// Param adds a PathParameter to document parameters used in the root path. -func (w *WebService) Param(parameter *Parameter) *WebService { - if w.pathParameters == nil { - w.pathParameters = []*Parameter{} - } - w.pathParameters = append(w.pathParameters, parameter) - return w -} - -// PathParameter creates a new Parameter of kind Path for documentation purposes. -// It is initialized as required with string as its DataType. -func (w *WebService) PathParameter(name, description string) *Parameter { - return PathParameter(name, description) -} - -// PathParameter creates a new Parameter of kind Path for documentation purposes. -// It is initialized as required with string as its DataType. -func PathParameter(name, description string) *Parameter { - p := &Parameter{&ParameterData{Name: name, Description: description, Required: true, DataType: "string"}} - p.bePath() - return p -} - -// QueryParameter creates a new Parameter of kind Query for documentation purposes. -// It is initialized as not required with string as its DataType. -func (w *WebService) QueryParameter(name, description string) *Parameter { - return QueryParameter(name, description) -} - -// QueryParameter creates a new Parameter of kind Query for documentation purposes. -// It is initialized as not required with string as its DataType. -func QueryParameter(name, description string) *Parameter { - p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string", CollectionFormat: CollectionFormatCSV.String()}} - p.beQuery() - return p -} - -// BodyParameter creates a new Parameter of kind Body for documentation purposes. -// It is initialized as required without a DataType. -func (w *WebService) BodyParameter(name, description string) *Parameter { - return BodyParameter(name, description) -} - -// BodyParameter creates a new Parameter of kind Body for documentation purposes. -// It is initialized as required without a DataType. -func BodyParameter(name, description string) *Parameter { - p := &Parameter{&ParameterData{Name: name, Description: description, Required: true}} - p.beBody() - return p -} - -// HeaderParameter creates a new Parameter of kind (Http) Header for documentation purposes. -// It is initialized as not required with string as its DataType. -func (w *WebService) HeaderParameter(name, description string) *Parameter { - return HeaderParameter(name, description) -} - -// HeaderParameter creates a new Parameter of kind (Http) Header for documentation purposes. -// It is initialized as not required with string as its DataType. -func HeaderParameter(name, description string) *Parameter { - p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}} - p.beHeader() - return p -} - -// FormParameter creates a new Parameter of kind Form (using application/x-www-form-urlencoded) for documentation purposes. -// It is initialized as required with string as its DataType. -func (w *WebService) FormParameter(name, description string) *Parameter { - return FormParameter(name, description) -} - -// FormParameter creates a new Parameter of kind Form (using application/x-www-form-urlencoded) for documentation purposes. -// It is initialized as required with string as its DataType. -func FormParameter(name, description string) *Parameter { - p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}} - p.beForm() - return p -} - -// Route creates a new Route using the RouteBuilder and add to the ordered list of Routes. -func (w *WebService) Route(builder *RouteBuilder) *WebService { - w.routesLock.Lock() - defer w.routesLock.Unlock() - builder.copyDefaults(w.produces, w.consumes) - w.routes = append(w.routes, builder.Build()) - return w -} - -// RemoveRoute removes the specified route, looks for something that matches 'path' and 'method' -func (w *WebService) RemoveRoute(path, method string) error { - if !w.dynamicRoutes { - return errors.New("dynamic routes are not enabled.") - } - w.routesLock.Lock() - defer w.routesLock.Unlock() - newRoutes := make([]Route, (len(w.routes) - 1)) - current := 0 - for ix := range w.routes { - if w.routes[ix].Method == method && w.routes[ix].Path == path { - continue - } - newRoutes[current] = w.routes[ix] - current = current + 1 - } - w.routes = newRoutes - return nil -} - -// Method creates a new RouteBuilder and initialize its http method -func (w *WebService) Method(httpMethod string) *RouteBuilder { - return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method(httpMethod) -} - -// Produces specifies that this WebService can produce one or more MIME types. -// Http requests must have one of these values set for the Accept header. -func (w *WebService) Produces(contentTypes ...string) *WebService { - w.produces = contentTypes - return w -} - -// Consumes specifies that this WebService can consume one or more MIME types. -// Http requests must have one of these values set for the Content-Type header. -func (w *WebService) Consumes(accepts ...string) *WebService { - w.consumes = accepts - return w -} - -// Routes returns the Routes associated with this WebService -func (w *WebService) Routes() []Route { - if !w.dynamicRoutes { - return w.routes - } - // Make a copy of the array to prevent concurrency problems - w.routesLock.RLock() - defer w.routesLock.RUnlock() - result := make([]Route, len(w.routes)) - for ix := range w.routes { - result[ix] = w.routes[ix] - } - return result -} - -// RootPath returns the RootPath associated with this WebService. Default "/" -func (w *WebService) RootPath() string { - return w.rootPath -} - -// PathParameters return the path parameter names for (shared among its Routes) -func (w *WebService) PathParameters() []*Parameter { - return w.pathParameters -} - -// Filter adds a filter function to the chain of filters applicable to all its Routes -func (w *WebService) Filter(filter FilterFunction) *WebService { - w.filters = append(w.filters, filter) - return w -} - -// Doc is used to set the documentation of this service. -func (w *WebService) Doc(plainText string) *WebService { - w.documentation = plainText - return w -} - -// Documentation returns it. -func (w *WebService) Documentation() string { - return w.documentation -} - -/* - Convenience methods -*/ - -// HEAD is a shortcut for .Method("HEAD").Path(subPath) -func (w *WebService) HEAD(subPath string) *RouteBuilder { - return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("HEAD").Path(subPath) -} - -// GET is a shortcut for .Method("GET").Path(subPath) -func (w *WebService) GET(subPath string) *RouteBuilder { - return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("GET").Path(subPath) -} - -// POST is a shortcut for .Method("POST").Path(subPath) -func (w *WebService) POST(subPath string) *RouteBuilder { - return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("POST").Path(subPath) -} - -// PUT is a shortcut for .Method("PUT").Path(subPath) -func (w *WebService) PUT(subPath string) *RouteBuilder { - return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("PUT").Path(subPath) -} - -// PATCH is a shortcut for .Method("PATCH").Path(subPath) -func (w *WebService) PATCH(subPath string) *RouteBuilder { - return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("PATCH").Path(subPath) -} - -// DELETE is a shortcut for .Method("DELETE").Path(subPath) -func (w *WebService) DELETE(subPath string) *RouteBuilder { - return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("DELETE").Path(subPath) -} diff --git a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/web_service_container.go b/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/web_service_container.go deleted file mode 100644 index c9d31b0..0000000 --- a/cmd/bpa-operator/vendor/github.com/emicklei/go-restful/web_service_container.go +++ /dev/null @@ -1,39 +0,0 @@ -package restful - -// Copyright 2013 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -import ( - "net/http" -) - -// DefaultContainer is a restful.Container that uses http.DefaultServeMux -var DefaultContainer *Container - -func init() { - DefaultContainer = NewContainer() - DefaultContainer.ServeMux = http.DefaultServeMux -} - -// If set the true then panics will not be caught to return HTTP 500. -// In that case, Route functions are responsible for handling any error situation. -// Default value is false = recover from panics. This has performance implications. -// OBSOLETE ; use restful.DefaultContainer.DoNotRecover(true) -var DoNotRecover = false - -// Add registers a new WebService add it to the DefaultContainer. -func Add(service *WebService) { - DefaultContainer.Add(service) -} - -// Filter appends a container FilterFunction from the DefaultContainer. -// These are called before dispatching a http.Request to a WebService. -func Filter(filter FilterFunction) { - DefaultContainer.Filter(filter) -} - -// RegisteredWebServices returns the collections of WebServices from the DefaultContainer -func RegisteredWebServices() []*WebService { - return DefaultContainer.RegisteredWebServices() -} diff --git a/cmd/bpa-operator/vendor/github.com/evanphx/json-patch/.travis.yml b/cmd/bpa-operator/vendor/github.com/evanphx/json-patch/.travis.yml deleted file mode 100644 index 2092c72..0000000 --- a/cmd/bpa-operator/vendor/github.com/evanphx/json-patch/.travis.yml +++ /dev/null @@ -1,16 +0,0 @@ -language: go - -go: - - 1.8 - - 1.7 - -install: - - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi - - go get github.com/jessevdk/go-flags - -script: - - go get - - go test -cover ./... - -notifications: - email: false diff --git a/cmd/bpa-operator/vendor/github.com/evanphx/json-patch/LICENSE b/cmd/bpa-operator/vendor/github.com/evanphx/json-patch/LICENSE deleted file mode 100644 index 0eb9b72..0000000 --- a/cmd/bpa-operator/vendor/github.com/evanphx/json-patch/LICENSE +++ /dev/null @@ -1,25 +0,0 @@ -Copyright (c) 2014, Evan Phoenix -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above copyright notice - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. -* Neither the name of the Evan Phoenix nor the names of its contributors - may be used to endorse or promote products derived from this software - without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/cmd/bpa-operator/vendor/github.com/evanphx/json-patch/README.md b/cmd/bpa-operator/vendor/github.com/evanphx/json-patch/README.md deleted file mode 100644 index a711d79..0000000 --- a/cmd/bpa-operator/vendor/github.com/evanphx/json-patch/README.md +++ /dev/null @@ -1,292 +0,0 @@ -# JSON-Patch -`jsonpatch` is a library which provides functionallity for both applying -[RFC6902 JSON patches](http://tools.ietf.org/html/rfc6902) against documents, as -well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ietf.org/html/rfc7396). - -[![GoDoc](https://godoc.org/github.com/evanphx/json-patch?status.svg)](http://godoc.org/github.com/evanphx/json-patch) -[![Build Status](https://travis-ci.org/evanphx/json-patch.svg?branch=master)](https://travis-ci.org/evanphx/json-patch) -[![Report Card](https://goreportcard.com/badge/github.com/evanphx/json-patch)](https://goreportcard.com/report/github.com/evanphx/json-patch) - -# Get It! - -**Latest and greatest**: -```bash -go get -u github.com/evanphx/json-patch -``` - -**Stable Versions**: -* Version 4: `go get -u gopkg.in/evanphx/json-patch.v4` - -(previous versions below `v3` are unavailable) - -# Use It! -* [Create and apply a merge patch](#create-and-apply-a-merge-patch) -* [Create and apply a JSON Patch](#create-and-apply-a-json-patch) -* [Comparing JSON documents](#comparing-json-documents) -* [Combine merge patches](#combine-merge-patches) - - -# Configuration - -There is a single global configuration variable `jsonpatch.SupportNegativeIndices'. This -defaults to `true` and enables the non-standard practice of allowing negative indices -to mean indices starting at the end of an array. This functionality can be disabled -by setting `jsonpatch.SupportNegativeIndices = false`. - -## Create and apply a merge patch -Given both an original JSON document and a modified JSON document, you can create -a [Merge Patch](https://tools.ietf.org/html/rfc7396) document. - -It can describe the changes needed to convert from the original to the -modified JSON document. - -Once you have a merge patch, you can apply it to other JSON documents using the -`jsonpatch.MergePatch(document, patch)` function. - -```go -package main - -import ( - "fmt" - - jsonpatch "github.com/evanphx/json-patch" -) - -func main() { - // Let's create a merge patch from these two documents... - original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) - target := []byte(`{"name": "Jane", "age": 24}`) - - patch, err := jsonpatch.CreateMergePatch(original, target) - if err != nil { - panic(err) - } - - // Now lets apply the patch against a different JSON document... - - alternative := []byte(`{"name": "Tina", "age": 28, "height": 3.75}`) - modifiedAlternative, err := jsonpatch.MergePatch(alternative, patch) - - fmt.Printf("patch document: %s\n", patch) - fmt.Printf("updated alternative doc: %s\n", modifiedAlternative) -} -``` - -When ran, you get the following output: - -```bash -$ go run main.go -patch document: {"height":null,"name":"Jane"} -updated tina doc: {"age":28,"name":"Jane"} -``` - -## Create and apply a JSON Patch -You can create patch objects using `DecodePatch([]byte)`, which can then -be applied against JSON documents. - -The following is an example of creating a patch from two operations, and -applying it against a JSON document. - -```go -package main - -import ( - "fmt" - - jsonpatch "github.com/evanphx/json-patch" -) - -func main() { - original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) - patchJSON := []byte(`[ - {"op": "replace", "path": "/name", "value": "Jane"}, - {"op": "remove", "path": "/height"} - ]`) - - patch, err := jsonpatch.DecodePatch(patchJSON) - if err != nil { - panic(err) - } - - modified, err := patch.Apply(original) - if err != nil { - panic(err) - } - - fmt.Printf("Original document: %s\n", original) - fmt.Printf("Modified document: %s\n", modified) -} -``` - -When ran, you get the following output: - -```bash -$ go run main.go -Original document: {"name": "John", "age": 24, "height": 3.21} -Modified document: {"age":24,"name":"Jane"} -``` - -## Comparing JSON documents -Due to potential whitespace and ordering differences, one cannot simply compare -JSON strings or byte-arrays directly. - -As such, you can instead use `jsonpatch.Equal(document1, document2)` to -determine if two JSON documents are _structurally_ equal. This ignores -whitespace differences, and key-value ordering. - -```go -package main - -import ( - "fmt" - - jsonpatch "github.com/evanphx/json-patch" -) - -func main() { - original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) - similar := []byte(` - { - "age": 24, - "height": 3.21, - "name": "John" - } - `) - different := []byte(`{"name": "Jane", "age": 20, "height": 3.37}`) - - if jsonpatch.Equal(original, similar) { - fmt.Println(`"original" is structurally equal to "similar"`) - } - - if !jsonpatch.Equal(original, different) { - fmt.Println(`"original" is _not_ structurally equal to "similar"`) - } -} -``` - -When ran, you get the following output: -```bash -$ go run main.go -"original" is structurally equal to "similar" -"original" is _not_ structurally equal to "similar" -``` - -## Combine merge patches -Given two JSON merge patch documents, it is possible to combine them into a -single merge patch which can describe both set of changes. - -The resulting merge patch can be used such that applying it results in a -document structurally similar as merging each merge patch to the document -in succession. - -```go -package main - -import ( - "fmt" - - jsonpatch "github.com/evanphx/json-patch" -) - -func main() { - original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) - - nameAndHeight := []byte(`{"height":null,"name":"Jane"}`) - ageAndEyes := []byte(`{"age":4.23,"eyes":"blue"}`) - - // Let's combine these merge patch documents... - combinedPatch, err := jsonpatch.MergeMergePatches(nameAndHeight, ageAndEyes) - if err != nil { - panic(err) - } - - // Apply each patch individual against the original document - withoutCombinedPatch, err := jsonpatch.MergePatch(original, nameAndHeight) - if err != nil { - panic(err) - } - - withoutCombinedPatch, err = jsonpatch.MergePatch(withoutCombinedPatch, ageAndEyes) - if err != nil { - panic(err) - } - - // Apply the combined patch against the original document - - withCombinedPatch, err := jsonpatch.MergePatch(original, combinedPatch) - if err != nil { - panic(err) - } - - // Do both result in the same thing? They should! - if jsonpatch.Equal(withCombinedPatch, withoutCombinedPatch) { - fmt.Println("Both JSON documents are structurally the same!") - } - - fmt.Printf("combined merge patch: %s", combinedPatch) -} -``` - -When ran, you get the following output: -```bash -$ go run main.go -Both JSON documents are structurally the same! -combined merge patch: {"age":4.23,"eyes":"blue","height":null,"name":"Jane"} -``` - -# CLI for comparing JSON documents -You can install the commandline program `json-patch`. - -This program can take multiple JSON patch documents as arguments, -and fed a JSON document from `stdin`. It will apply the patch(es) against -the document and output the modified doc. - -**patch.1.json** -```json -[ - {"op": "replace", "path": "/name", "value": "Jane"}, - {"op": "remove", "path": "/height"} -] -``` - -**patch.2.json** -```json -[ - {"op": "add", "path": "/address", "value": "123 Main St"}, - {"op": "replace", "path": "/age", "value": "21"} -] -``` - -**document.json** -```json -{ - "name": "John", - "age": 24, - "height": 3.21 -} -``` - -You can then run: - -```bash -$ go install github.com/evanphx/json-patch/cmd/json-patch -$ cat document.json | json-patch -p patch.1.json -p patch.2.json -{"address":"123 Main St","age":"21","name":"Jane"} -``` - -# Help It! -Contributions are welcomed! Leave [an issue](https://github.com/evanphx/json-patch/issues) -or [create a PR](https://github.com/evanphx/json-patch/compare). - - -Before creating a pull request, we'd ask that you make sure tests are passing -and that you have added new tests when applicable. - -Contributors can run tests using: - -```bash -go test -cover ./... -``` - -Builds for pull requests are tested automatically -using [TravisCI](https://travis-ci.org/evanphx/json-patch). diff --git a/cmd/bpa-operator/vendor/github.com/evanphx/json-patch/merge.go b/cmd/bpa-operator/vendor/github.com/evanphx/json-patch/merge.go deleted file mode 100644 index 6806c4c..0000000 --- a/cmd/bpa-operator/vendor/github.com/evanphx/json-patch/merge.go +++ /dev/null @@ -1,383 +0,0 @@ -package jsonpatch - -import ( - "bytes" - "encoding/json" - "fmt" - "reflect" -) - -func merge(cur, patch *lazyNode, mergeMerge bool) *lazyNode { - curDoc, err := cur.intoDoc() - - if err != nil { - pruneNulls(patch) - return patch - } - - patchDoc, err := patch.intoDoc() - - if err != nil { - return patch - } - - mergeDocs(curDoc, patchDoc, mergeMerge) - - return cur -} - -func mergeDocs(doc, patch *partialDoc, mergeMerge bool) { - for k, v := range *patch { - if v == nil { - if mergeMerge { - (*doc)[k] = nil - } else { - delete(*doc, k) - } - } else { - cur, ok := (*doc)[k] - - if !ok || cur == nil { - pruneNulls(v) - (*doc)[k] = v - } else { - (*doc)[k] = merge(cur, v, mergeMerge) - } - } - } -} - -func pruneNulls(n *lazyNode) { - sub, err := n.intoDoc() - - if err == nil { - pruneDocNulls(sub) - } else { - ary, err := n.intoAry() - - if err == nil { - pruneAryNulls(ary) - } - } -} - -func pruneDocNulls(doc *partialDoc) *partialDoc { - for k, v := range *doc { - if v == nil { - delete(*doc, k) - } else { - pruneNulls(v) - } - } - - return doc -} - -func pruneAryNulls(ary *partialArray) *partialArray { - newAry := []*lazyNode{} - - for _, v := range *ary { - if v != nil { - pruneNulls(v) - newAry = append(newAry, v) - } - } - - *ary = newAry - - return ary -} - -var errBadJSONDoc = fmt.Errorf("Invalid JSON Document") -var errBadJSONPatch = fmt.Errorf("Invalid JSON Patch") -var errBadMergeTypes = fmt.Errorf("Mismatched JSON Documents") - -// MergeMergePatches merges two merge patches together, such that -// applying this resulting merged merge patch to a document yields the same -// as merging each merge patch to the document in succession. -func MergeMergePatches(patch1Data, patch2Data []byte) ([]byte, error) { - return doMergePatch(patch1Data, patch2Data, true) -} - -// MergePatch merges the patchData into the docData. -func MergePatch(docData, patchData []byte) ([]byte, error) { - return doMergePatch(docData, patchData, false) -} - -func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) { - doc := &partialDoc{} - - docErr := json.Unmarshal(docData, doc) - - patch := &partialDoc{} - - patchErr := json.Unmarshal(patchData, patch) - - if _, ok := docErr.(*json.SyntaxError); ok { - return nil, errBadJSONDoc - } - - if _, ok := patchErr.(*json.SyntaxError); ok { - return nil, errBadJSONPatch - } - - if docErr == nil && *doc == nil { - return nil, errBadJSONDoc - } - - if patchErr == nil && *patch == nil { - return nil, errBadJSONPatch - } - - if docErr != nil || patchErr != nil { - // Not an error, just not a doc, so we turn straight into the patch - if patchErr == nil { - if mergeMerge { - doc = patch - } else { - doc = pruneDocNulls(patch) - } - } else { - patchAry := &partialArray{} - patchErr = json.Unmarshal(patchData, patchAry) - - if patchErr != nil { - return nil, errBadJSONPatch - } - - pruneAryNulls(patchAry) - - out, patchErr := json.Marshal(patchAry) - - if patchErr != nil { - return nil, errBadJSONPatch - } - - return out, nil - } - } else { - mergeDocs(doc, patch, mergeMerge) - } - - return json.Marshal(doc) -} - -// resemblesJSONArray indicates whether the byte-slice "appears" to be -// a JSON array or not. -// False-positives are possible, as this function does not check the internal -// structure of the array. It only checks that the outer syntax is present and -// correct. -func resemblesJSONArray(input []byte) bool { - input = bytes.TrimSpace(input) - - hasPrefix := bytes.HasPrefix(input, []byte("[")) - hasSuffix := bytes.HasSuffix(input, []byte("]")) - - return hasPrefix && hasSuffix -} - -// CreateMergePatch will return a merge patch document capable of converting -// the original document(s) to the modified document(s). -// The parameters can be bytes of either two JSON Documents, or two arrays of -// JSON documents. -// The merge patch returned follows the specification defined at http://tools.ietf.org/html/draft-ietf-appsawg-json-merge-patch-07 -func CreateMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { - originalResemblesArray := resemblesJSONArray(originalJSON) - modifiedResemblesArray := resemblesJSONArray(modifiedJSON) - - // Do both byte-slices seem like JSON arrays? - if originalResemblesArray && modifiedResemblesArray { - return createArrayMergePatch(originalJSON, modifiedJSON) - } - - // Are both byte-slices are not arrays? Then they are likely JSON objects... - if !originalResemblesArray && !modifiedResemblesArray { - return createObjectMergePatch(originalJSON, modifiedJSON) - } - - // None of the above? Then return an error because of mismatched types. - return nil, errBadMergeTypes -} - -// createObjectMergePatch will return a merge-patch document capable of -// converting the original document to the modified document. -func createObjectMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { - originalDoc := map[string]interface{}{} - modifiedDoc := map[string]interface{}{} - - err := json.Unmarshal(originalJSON, &originalDoc) - if err != nil { - return nil, errBadJSONDoc - } - - err = json.Unmarshal(modifiedJSON, &modifiedDoc) - if err != nil { - return nil, errBadJSONDoc - } - - dest, err := getDiff(originalDoc, modifiedDoc) - if err != nil { - return nil, err - } - - return json.Marshal(dest) -} - -// createArrayMergePatch will return an array of merge-patch documents capable -// of converting the original document to the modified document for each -// pair of JSON documents provided in the arrays. -// Arrays of mismatched sizes will result in an error. -func createArrayMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { - originalDocs := []json.RawMessage{} - modifiedDocs := []json.RawMessage{} - - err := json.Unmarshal(originalJSON, &originalDocs) - if err != nil { - return nil, errBadJSONDoc - } - - err = json.Unmarshal(modifiedJSON, &modifiedDocs) - if err != nil { - return nil, errBadJSONDoc - } - - total := len(originalDocs) - if len(modifiedDocs) != total { - return nil, errBadJSONDoc - } - - result := []json.RawMessage{} - for i := 0; i < len(originalDocs); i++ { - original := originalDocs[i] - modified := modifiedDocs[i] - - patch, err := createObjectMergePatch(original, modified) - if err != nil { - return nil, err - } - - result = append(result, json.RawMessage(patch)) - } - - return json.Marshal(result) -} - -// Returns true if the array matches (must be json types). -// As is idiomatic for go, an empty array is not the same as a nil array. -func matchesArray(a, b []interface{}) bool { - if len(a) != len(b) { - return false - } - if (a == nil && b != nil) || (a != nil && b == nil) { - return false - } - for i := range a { - if !matchesValue(a[i], b[i]) { - return false - } - } - return true -} - -// Returns true if the values matches (must be json types) -// The types of the values must match, otherwise it will always return false -// If two map[string]interface{} are given, all elements must match. -func matchesValue(av, bv interface{}) bool { - if reflect.TypeOf(av) != reflect.TypeOf(bv) { - return false - } - switch at := av.(type) { - case string: - bt := bv.(string) - if bt == at { - return true - } - case float64: - bt := bv.(float64) - if bt == at { - return true - } - case bool: - bt := bv.(bool) - if bt == at { - return true - } - case nil: - // Both nil, fine. - return true - case map[string]interface{}: - bt := bv.(map[string]interface{}) - for key := range at { - if !matchesValue(at[key], bt[key]) { - return false - } - } - for key := range bt { - if !matchesValue(at[key], bt[key]) { - return false - } - } - return true - case []interface{}: - bt := bv.([]interface{}) - return matchesArray(at, bt) - } - return false -} - -// getDiff returns the (recursive) difference between a and b as a map[string]interface{}. -func getDiff(a, b map[string]interface{}) (map[string]interface{}, error) { - into := map[string]interface{}{} - for key, bv := range b { - av, ok := a[key] - // value was added - if !ok { - into[key] = bv - continue - } - // If types have changed, replace completely - if reflect.TypeOf(av) != reflect.TypeOf(bv) { - into[key] = bv - continue - } - // Types are the same, compare values - switch at := av.(type) { - case map[string]interface{}: - bt := bv.(map[string]interface{}) - dst := make(map[string]interface{}, len(bt)) - dst, err := getDiff(at, bt) - if err != nil { - return nil, err - } - if len(dst) > 0 { - into[key] = dst - } - case string, float64, bool: - if !matchesValue(av, bv) { - into[key] = bv - } - case []interface{}: - bt := bv.([]interface{}) - if !matchesArray(at, bt) { - into[key] = bv - } - case nil: - switch bv.(type) { - case nil: - // Both nil, fine. - default: - into[key] = bv - } - default: - panic(fmt.Sprintf("Unknown type:%T in key %s", av, key)) - } - } - // Now add all deleted values as nil - for key := range a { - _, found := b[key] - if !found { - into[key] = nil - } - } - return into, nil -} diff --git a/cmd/bpa-operator/vendor/github.com/evanphx/json-patch/patch.go b/cmd/bpa-operator/vendor/github.com/evanphx/json-patch/patch.go deleted file mode 100644 index f26b682..0000000 --- a/cmd/bpa-operator/vendor/github.com/evanphx/json-patch/patch.go +++ /dev/null @@ -1,682 +0,0 @@ -package jsonpatch - -import ( - "bytes" - "encoding/json" - "fmt" - "strconv" - "strings" -) - -const ( - eRaw = iota - eDoc - eAry -) - -var SupportNegativeIndices bool = true - -type lazyNode struct { - raw *json.RawMessage - doc partialDoc - ary partialArray - which int -} - -type operation map[string]*json.RawMessage - -// Patch is an ordered collection of operations. -type Patch []operation - -type partialDoc map[string]*lazyNode -type partialArray []*lazyNode - -type container interface { - get(key string) (*lazyNode, error) - set(key string, val *lazyNode) error - add(key string, val *lazyNode) error - remove(key string) error -} - -func newLazyNode(raw *json.RawMessage) *lazyNode { - return &lazyNode{raw: raw, doc: nil, ary: nil, which: eRaw} -} - -func (n *lazyNode) MarshalJSON() ([]byte, error) { - switch n.which { - case eRaw: - return json.Marshal(n.raw) - case eDoc: - return json.Marshal(n.doc) - case eAry: - return json.Marshal(n.ary) - default: - return nil, fmt.Errorf("Unknown type") - } -} - -func (n *lazyNode) UnmarshalJSON(data []byte) error { - dest := make(json.RawMessage, len(data)) - copy(dest, data) - n.raw = &dest - n.which = eRaw - return nil -} - -func (n *lazyNode) intoDoc() (*partialDoc, error) { - if n.which == eDoc { - return &n.doc, nil - } - - if n.raw == nil { - return nil, fmt.Errorf("Unable to unmarshal nil pointer as partial document") - } - - err := json.Unmarshal(*n.raw, &n.doc) - - if err != nil { - return nil, err - } - - n.which = eDoc - return &n.doc, nil -} - -func (n *lazyNode) intoAry() (*partialArray, error) { - if n.which == eAry { - return &n.ary, nil - } - - if n.raw == nil { - return nil, fmt.Errorf("Unable to unmarshal nil pointer as partial array") - } - - err := json.Unmarshal(*n.raw, &n.ary) - - if err != nil { - return nil, err - } - - n.which = eAry - return &n.ary, nil -} - -func (n *lazyNode) compact() []byte { - buf := &bytes.Buffer{} - - if n.raw == nil { - return nil - } - - err := json.Compact(buf, *n.raw) - - if err != nil { - return *n.raw - } - - return buf.Bytes() -} - -func (n *lazyNode) tryDoc() bool { - if n.raw == nil { - return false - } - - err := json.Unmarshal(*n.raw, &n.doc) - - if err != nil { - return false - } - - n.which = eDoc - return true -} - -func (n *lazyNode) tryAry() bool { - if n.raw == nil { - return false - } - - err := json.Unmarshal(*n.raw, &n.ary) - - if err != nil { - return false - } - - n.which = eAry - return true -} - -func (n *lazyNode) equal(o *lazyNode) bool { - if n.which == eRaw { - if !n.tryDoc() && !n.tryAry() { - if o.which != eRaw { - return false - } - - return bytes.Equal(n.compact(), o.compact()) - } - } - - if n.which == eDoc { - if o.which == eRaw { - if !o.tryDoc() { - return false - } - } - - if o.which != eDoc { - return false - } - - for k, v := range n.doc { - ov, ok := o.doc[k] - - if !ok { - return false - } - - if v == nil && ov == nil { - continue - } - - if !v.equal(ov) { - return false - } - } - - return true - } - - if o.which != eAry && !o.tryAry() { - return false - } - - if len(n.ary) != len(o.ary) { - return false - } - - for idx, val := range n.ary { - if !val.equal(o.ary[idx]) { - return false - } - } - - return true -} - -func (o operation) kind() string { - if obj, ok := o["op"]; ok && obj != nil { - var op string - - err := json.Unmarshal(*obj, &op) - - if err != nil { - return "unknown" - } - - return op - } - - return "unknown" -} - -func (o operation) path() string { - if obj, ok := o["path"]; ok && obj != nil { - var op string - - err := json.Unmarshal(*obj, &op) - - if err != nil { - return "unknown" - } - - return op - } - - return "unknown" -} - -func (o operation) from() string { - if obj, ok := o["from"]; ok && obj != nil { - var op string - - err := json.Unmarshal(*obj, &op) - - if err != nil { - return "unknown" - } - - return op - } - - return "unknown" -} - -func (o operation) value() *lazyNode { - if obj, ok := o["value"]; ok { - return newLazyNode(obj) - } - - return nil -} - -func isArray(buf []byte) bool { -Loop: - for _, c := range buf { - switch c { - case ' ': - case '\n': - case '\t': - continue - case '[': - return true - default: - break Loop - } - } - - return false -} - -func findObject(pd *container, path string) (container, string) { - doc := *pd - - split := strings.Split(path, "/") - - if len(split) < 2 { - return nil, "" - } - - parts := split[1 : len(split)-1] - - key := split[len(split)-1] - - var err error - - for _, part := range parts { - - next, ok := doc.get(decodePatchKey(part)) - - if next == nil || ok != nil { - return nil, "" - } - - if isArray(*next.raw) { - doc, err = next.intoAry() - - if err != nil { - return nil, "" - } - } else { - doc, err = next.intoDoc() - - if err != nil { - return nil, "" - } - } - } - - return doc, decodePatchKey(key) -} - -func (d *partialDoc) set(key string, val *lazyNode) error { - (*d)[key] = val - return nil -} - -func (d *partialDoc) add(key string, val *lazyNode) error { - (*d)[key] = val - return nil -} - -func (d *partialDoc) get(key string) (*lazyNode, error) { - return (*d)[key], nil -} - -func (d *partialDoc) remove(key string) error { - _, ok := (*d)[key] - if !ok { - return fmt.Errorf("Unable to remove nonexistent key: %s", key) - } - - delete(*d, key) - return nil -} - -func (d *partialArray) set(key string, val *lazyNode) error { - if key == "-" { - *d = append(*d, val) - return nil - } - - idx, err := strconv.Atoi(key) - if err != nil { - return err - } - - sz := len(*d) - if idx+1 > sz { - sz = idx + 1 - } - - ary := make([]*lazyNode, sz) - - cur := *d - - copy(ary, cur) - - if idx >= len(ary) { - return fmt.Errorf("Unable to access invalid index: %d", idx) - } - - ary[idx] = val - - *d = ary - return nil -} - -func (d *partialArray) add(key string, val *lazyNode) error { - if key == "-" { - *d = append(*d, val) - return nil - } - - idx, err := strconv.Atoi(key) - if err != nil { - return err - } - - ary := make([]*lazyNode, len(*d)+1) - - cur := *d - - if idx >= len(ary) { - return fmt.Errorf("Unable to access invalid index: %d", idx) - } - - if SupportNegativeIndices { - if idx < -len(ary) { - return fmt.Errorf("Unable to access invalid index: %d", idx) - } - - if idx < 0 { - idx += len(ary) - } - } - - copy(ary[0:idx], cur[0:idx]) - ary[idx] = val - copy(ary[idx+1:], cur[idx:]) - - *d = ary - return nil -} - -func (d *partialArray) get(key string) (*lazyNode, error) { - idx, err := strconv.Atoi(key) - - if err != nil { - return nil, err - } - - if idx >= len(*d) { - return nil, fmt.Errorf("Unable to access invalid index: %d", idx) - } - - return (*d)[idx], nil -} - -func (d *partialArray) remove(key string) error { - idx, err := strconv.Atoi(key) - if err != nil { - return err - } - - cur := *d - - if idx >= len(cur) { - return fmt.Errorf("Unable to access invalid index: %d", idx) - } - - if SupportNegativeIndices { - if idx < -len(cur) { - return fmt.Errorf("Unable to access invalid index: %d", idx) - } - - if idx < 0 { - idx += len(cur) - } - } - - ary := make([]*lazyNode, len(cur)-1) - - copy(ary[0:idx], cur[0:idx]) - copy(ary[idx:], cur[idx+1:]) - - *d = ary - return nil - -} - -func (p Patch) add(doc *container, op operation) error { - path := op.path() - - con, key := findObject(doc, path) - - if con == nil { - return fmt.Errorf("jsonpatch add operation does not apply: doc is missing path: \"%s\"", path) - } - - return con.add(key, op.value()) -} - -func (p Patch) remove(doc *container, op operation) error { - path := op.path() - - con, key := findObject(doc, path) - - if con == nil { - return fmt.Errorf("jsonpatch remove operation does not apply: doc is missing path: \"%s\"", path) - } - - return con.remove(key) -} - -func (p Patch) replace(doc *container, op operation) error { - path := op.path() - - con, key := findObject(doc, path) - - if con == nil { - return fmt.Errorf("jsonpatch replace operation does not apply: doc is missing path: %s", path) - } - - _, ok := con.get(key) - if ok != nil { - return fmt.Errorf("jsonpatch replace operation does not apply: doc is missing key: %s", path) - } - - return con.set(key, op.value()) -} - -func (p Patch) move(doc *container, op operation) error { - from := op.from() - - con, key := findObject(doc, from) - - if con == nil { - return fmt.Errorf("jsonpatch move operation does not apply: doc is missing from path: %s", from) - } - - val, err := con.get(key) - if err != nil { - return err - } - - err = con.remove(key) - if err != nil { - return err - } - - path := op.path() - - con, key = findObject(doc, path) - - if con == nil { - return fmt.Errorf("jsonpatch move operation does not apply: doc is missing destination path: %s", path) - } - - return con.set(key, val) -} - -func (p Patch) test(doc *container, op operation) error { - path := op.path() - - con, key := findObject(doc, path) - - if con == nil { - return fmt.Errorf("jsonpatch test operation does not apply: is missing path: %s", path) - } - - val, err := con.get(key) - - if err != nil { - return err - } - - if val == nil { - if op.value().raw == nil { - return nil - } - return fmt.Errorf("Testing value %s failed", path) - } else if op.value() == nil { - return fmt.Errorf("Testing value %s failed", path) - } - - if val.equal(op.value()) { - return nil - } - - return fmt.Errorf("Testing value %s failed", path) -} - -func (p Patch) copy(doc *container, op operation) error { - from := op.from() - - con, key := findObject(doc, from) - - if con == nil { - return fmt.Errorf("jsonpatch copy operation does not apply: doc is missing from path: %s", from) - } - - val, err := con.get(key) - if err != nil { - return err - } - - path := op.path() - - con, key = findObject(doc, path) - - if con == nil { - return fmt.Errorf("jsonpatch copy operation does not apply: doc is missing destination path: %s", path) - } - - return con.set(key, val) -} - -// Equal indicates if 2 JSON documents have the same structural equality. -func Equal(a, b []byte) bool { - ra := make(json.RawMessage, len(a)) - copy(ra, a) - la := newLazyNode(&ra) - - rb := make(json.RawMessage, len(b)) - copy(rb, b) - lb := newLazyNode(&rb) - - return la.equal(lb) -} - -// DecodePatch decodes the passed JSON document as an RFC 6902 patch. -func DecodePatch(buf []byte) (Patch, error) { - var p Patch - - err := json.Unmarshal(buf, &p) - - if err != nil { - return nil, err - } - - return p, nil -} - -// Apply mutates a JSON document according to the patch, and returns the new -// document. -func (p Patch) Apply(doc []byte) ([]byte, error) { - return p.ApplyIndent(doc, "") -} - -// ApplyIndent mutates a JSON document according to the patch, and returns the new -// document indented. -func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) { - var pd container - if doc[0] == '[' { - pd = &partialArray{} - } else { - pd = &partialDoc{} - } - - err := json.Unmarshal(doc, pd) - - if err != nil { - return nil, err - } - - err = nil - - for _, op := range p { - switch op.kind() { - case "add": - err = p.add(&pd, op) - case "remove": - err = p.remove(&pd, op) - case "replace": - err = p.replace(&pd, op) - case "move": - err = p.move(&pd, op) - case "test": - err = p.test(&pd, op) - case "copy": - err = p.copy(&pd, op) - default: - err = fmt.Errorf("Unexpected kind: %s", op.kind()) - } - - if err != nil { - return nil, err - } - } - - if indent != "" { - return json.MarshalIndent(pd, "", indent) - } - - return json.Marshal(pd) -} - -// From http://tools.ietf.org/html/rfc6901#section-4 : -// -// Evaluation of each reference token begins by decoding any escaped -// character sequence. This is performed by first transforming any -// occurrence of the sequence '~1' to '/', and then transforming any -// occurrence of the sequence '~0' to '~'. - -var ( - rfc6901Decoder = strings.NewReplacer("~1", "/", "~0", "~") -) - -func decodePatchKey(k string) string { - return rfc6901Decoder.Replace(k) -} diff --git a/cmd/bpa-operator/vendor/github.com/ghodss/yaml/.gitignore b/cmd/bpa-operator/vendor/github.com/ghodss/yaml/.gitignore deleted file mode 100644 index e256a31..0000000 --- a/cmd/bpa-operator/vendor/github.com/ghodss/yaml/.gitignore +++ /dev/null @@ -1,20 +0,0 @@ -# OSX leaves these everywhere on SMB shares -._* - -# Eclipse files -.classpath -.project -.settings/** - -# Emacs save files -*~ - -# Vim-related files -[._]*.s[a-w][a-z] -[._]s[a-w][a-z] -*.un~ -Session.vim -.netrwhist - -# Go test binaries -*.test diff --git a/cmd/bpa-operator/vendor/github.com/ghodss/yaml/.travis.yml b/cmd/bpa-operator/vendor/github.com/ghodss/yaml/.travis.yml deleted file mode 100644 index 0e9d6ed..0000000 --- a/cmd/bpa-operator/vendor/github.com/ghodss/yaml/.travis.yml +++ /dev/null @@ -1,7 +0,0 @@ -language: go -go: - - 1.3 - - 1.4 -script: - - go test - - go build diff --git a/cmd/bpa-operator/vendor/github.com/ghodss/yaml/LICENSE b/cmd/bpa-operator/vendor/github.com/ghodss/yaml/LICENSE deleted file mode 100644 index 7805d36..0000000 --- a/cmd/bpa-operator/vendor/github.com/ghodss/yaml/LICENSE +++ /dev/null @@ -1,50 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Sam Ghods - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/cmd/bpa-operator/vendor/github.com/ghodss/yaml/README.md b/cmd/bpa-operator/vendor/github.com/ghodss/yaml/README.md deleted file mode 100644 index 0200f75..0000000 --- a/cmd/bpa-operator/vendor/github.com/ghodss/yaml/README.md +++ /dev/null @@ -1,121 +0,0 @@ -# YAML marshaling and unmarshaling support for Go - -[![Build Status](https://travis-ci.org/ghodss/yaml.svg)](https://travis-ci.org/ghodss/yaml) - -## Introduction - -A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs. - -In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/). - -## Compatibility - -This package uses [go-yaml](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility). - -## Caveats - -**Caveat #1:** When using `yaml.Marshal` and `yaml.Unmarshal`, binary data should NOT be preceded with the `!!binary` YAML tag. If you do, go-yaml will convert the binary data from base64 to native binary data, which is not compatible with JSON. You can still use binary in your YAML files though - just store them without the `!!binary` tag and decode the base64 in your code (e.g. in the custom JSON methods `MarshalJSON` and `UnmarshalJSON`). This also has the benefit that your YAML and your JSON binary data will be decoded exactly the same way. As an example: - -``` -BAD: - exampleKey: !!binary gIGC - -GOOD: - exampleKey: gIGC -... and decode the base64 data in your code. -``` - -**Caveat #2:** When using `YAMLToJSON` directly, maps with keys that are maps will result in an error since this is not supported by JSON. This error will occur in `Unmarshal` as well since you can't unmarshal map keys anyways since struct fields can't be keys. - -## Installation and usage - -To install, run: - -``` -$ go get github.com/ghodss/yaml -``` - -And import using: - -``` -import "github.com/ghodss/yaml" -``` - -Usage is very similar to the JSON library: - -```go -package main - -import ( - "fmt" - - "github.com/ghodss/yaml" -) - -type Person struct { - Name string `json:"name"` // Affects YAML field names too. - Age int `json:"age"` -} - -func main() { - // Marshal a Person struct to YAML. - p := Person{"John", 30} - y, err := yaml.Marshal(p) - if err != nil { - fmt.Printf("err: %v\n", err) - return - } - fmt.Println(string(y)) - /* Output: - age: 30 - name: John - */ - - // Unmarshal the YAML back into a Person struct. - var p2 Person - err = yaml.Unmarshal(y, &p2) - if err != nil { - fmt.Printf("err: %v\n", err) - return - } - fmt.Println(p2) - /* Output: - {John 30} - */ -} -``` - -`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available: - -```go -package main - -import ( - "fmt" - - "github.com/ghodss/yaml" -) - -func main() { - j := []byte(`{"name": "John", "age": 30}`) - y, err := yaml.JSONToYAML(j) - if err != nil { - fmt.Printf("err: %v\n", err) - return - } - fmt.Println(string(y)) - /* Output: - name: John - age: 30 - */ - j2, err := yaml.YAMLToJSON(y) - if err != nil { - fmt.Printf("err: %v\n", err) - return - } - fmt.Println(string(j2)) - /* Output: - {"age":30,"name":"John"} - */ -} -``` diff --git a/cmd/bpa-operator/vendor/github.com/ghodss/yaml/fields.go b/cmd/bpa-operator/vendor/github.com/ghodss/yaml/fields.go deleted file mode 100644 index 5860074..0000000 --- a/cmd/bpa-operator/vendor/github.com/ghodss/yaml/fields.go +++ /dev/null @@ -1,501 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -package yaml - -import ( - "bytes" - "encoding" - "encoding/json" - "reflect" - "sort" - "strings" - "sync" - "unicode" - "unicode/utf8" -) - -// indirect walks down v allocating pointers as needed, -// until it gets to a non-pointer. -// if it encounters an Unmarshaler, indirect stops and returns that. -// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. -func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { - // If v is a named type and is addressable, - // start with its address, so that if the type has pointer methods, - // we find them. - if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { - v = v.Addr() - } - for { - // Load value from interface, but only if the result will be - // usefully addressable. - if v.Kind() == reflect.Interface && !v.IsNil() { - e := v.Elem() - if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { - v = e - continue - } - } - - if v.Kind() != reflect.Ptr { - break - } - - if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { - break - } - if v.IsNil() { - if v.CanSet() { - v.Set(reflect.New(v.Type().Elem())) - } else { - v = reflect.New(v.Type().Elem()) - } - } - if v.Type().NumMethod() > 0 { - if u, ok := v.Interface().(json.Unmarshaler); ok { - return u, nil, reflect.Value{} - } - if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { - return nil, u, reflect.Value{} - } - } - v = v.Elem() - } - return nil, nil, v -} - -// A field represents a single field found in a struct. -type field struct { - name string - nameBytes []byte // []byte(name) - equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent - - tag bool - index []int - typ reflect.Type - omitEmpty bool - quoted bool -} - -func fillField(f field) field { - f.nameBytes = []byte(f.name) - f.equalFold = foldFunc(f.nameBytes) - return f -} - -// byName sorts field by name, breaking ties with depth, -// then breaking ties with "name came from json tag", then -// breaking ties with index sequence. -type byName []field - -func (x byName) Len() int { return len(x) } - -func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x byName) Less(i, j int) bool { - if x[i].name != x[j].name { - return x[i].name < x[j].name - } - if len(x[i].index) != len(x[j].index) { - return len(x[i].index) < len(x[j].index) - } - if x[i].tag != x[j].tag { - return x[i].tag - } - return byIndex(x).Less(i, j) -} - -// byIndex sorts field by index sequence. -type byIndex []field - -func (x byIndex) Len() int { return len(x) } - -func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x byIndex) Less(i, j int) bool { - for k, xik := range x[i].index { - if k >= len(x[j].index) { - return false - } - if xik != x[j].index[k] { - return xik < x[j].index[k] - } - } - return len(x[i].index) < len(x[j].index) -} - -// typeFields returns a list of fields that JSON should recognize for the given type. -// The algorithm is breadth-first search over the set of structs to include - the top struct -// and then any reachable anonymous structs. -func typeFields(t reflect.Type) []field { - // Anonymous fields to explore at the current level and the next. - current := []field{} - next := []field{{typ: t}} - - // Count of queued names for current level and the next. - count := map[reflect.Type]int{} - nextCount := map[reflect.Type]int{} - - // Types already visited at an earlier level. - visited := map[reflect.Type]bool{} - - // Fields found. - var fields []field - - for len(next) > 0 { - current, next = next, current[:0] - count, nextCount = nextCount, map[reflect.Type]int{} - - for _, f := range current { - if visited[f.typ] { - continue - } - visited[f.typ] = true - - // Scan f.typ for fields to include. - for i := 0; i < f.typ.NumField(); i++ { - sf := f.typ.Field(i) - if sf.PkgPath != "" { // unexported - continue - } - tag := sf.Tag.Get("json") - if tag == "-" { - continue - } - name, opts := parseTag(tag) - if !isValidTag(name) { - name = "" - } - index := make([]int, len(f.index)+1) - copy(index, f.index) - index[len(f.index)] = i - - ft := sf.Type - if ft.Name() == "" && ft.Kind() == reflect.Ptr { - // Follow pointer. - ft = ft.Elem() - } - - // Record found field and index sequence. - if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { - tagged := name != "" - if name == "" { - name = sf.Name - } - fields = append(fields, fillField(field{ - name: name, - tag: tagged, - index: index, - typ: ft, - omitEmpty: opts.Contains("omitempty"), - quoted: opts.Contains("string"), - })) - if count[f.typ] > 1 { - // If there were multiple instances, add a second, - // so that the annihilation code will see a duplicate. - // It only cares about the distinction between 1 or 2, - // so don't bother generating any more copies. - fields = append(fields, fields[len(fields)-1]) - } - continue - } - - // Record new anonymous struct to explore in next round. - nextCount[ft]++ - if nextCount[ft] == 1 { - next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft})) - } - } - } - } - - sort.Sort(byName(fields)) - - // Delete all fields that are hidden by the Go rules for embedded fields, - // except that fields with JSON tags are promoted. - - // The fields are sorted in primary order of name, secondary order - // of field index length. Loop over names; for each name, delete - // hidden fields by choosing the one dominant field that survives. - out := fields[:0] - for advance, i := 0, 0; i < len(fields); i += advance { - // One iteration per name. - // Find the sequence of fields with the name of this first field. - fi := fields[i] - name := fi.name - for advance = 1; i+advance < len(fields); advance++ { - fj := fields[i+advance] - if fj.name != name { - break - } - } - if advance == 1 { // Only one field with this name - out = append(out, fi) - continue - } - dominant, ok := dominantField(fields[i : i+advance]) - if ok { - out = append(out, dominant) - } - } - - fields = out - sort.Sort(byIndex(fields)) - - return fields -} - -// dominantField looks through the fields, all of which are known to -// have the same name, to find the single field that dominates the -// others using Go's embedding rules, modified by the presence of -// JSON tags. If there are multiple top-level fields, the boolean -// will be false: This condition is an error in Go and we skip all -// the fields. -func dominantField(fields []field) (field, bool) { - // The fields are sorted in increasing index-length order. The winner - // must therefore be one with the shortest index length. Drop all - // longer entries, which is easy: just truncate the slice. - length := len(fields[0].index) - tagged := -1 // Index of first tagged field. - for i, f := range fields { - if len(f.index) > length { - fields = fields[:i] - break - } - if f.tag { - if tagged >= 0 { - // Multiple tagged fields at the same level: conflict. - // Return no field. - return field{}, false - } - tagged = i - } - } - if tagged >= 0 { - return fields[tagged], true - } - // All remaining fields have the same length. If there's more than one, - // we have a conflict (two fields named "X" at the same level) and we - // return no field. - if len(fields) > 1 { - return field{}, false - } - return fields[0], true -} - -var fieldCache struct { - sync.RWMutex - m map[reflect.Type][]field -} - -// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. -func cachedTypeFields(t reflect.Type) []field { - fieldCache.RLock() - f := fieldCache.m[t] - fieldCache.RUnlock() - if f != nil { - return f - } - - // Compute fields without lock. - // Might duplicate effort but won't hold other computations back. - f = typeFields(t) - if f == nil { - f = []field{} - } - - fieldCache.Lock() - if fieldCache.m == nil { - fieldCache.m = map[reflect.Type][]field{} - } - fieldCache.m[t] = f - fieldCache.Unlock() - return f -} - -func isValidTag(s string) bool { - if s == "" { - return false - } - for _, c := range s { - switch { - case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): - // Backslash and quote chars are reserved, but - // otherwise any punctuation chars are allowed - // in a tag name. - default: - if !unicode.IsLetter(c) && !unicode.IsDigit(c) { - return false - } - } - } - return true -} - -const ( - caseMask = ^byte(0x20) // Mask to ignore case in ASCII. - kelvin = '\u212a' - smallLongEss = '\u017f' -) - -// foldFunc returns one of four different case folding equivalence -// functions, from most general (and slow) to fastest: -// -// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8 -// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S') -// 3) asciiEqualFold, no special, but includes non-letters (including _) -// 4) simpleLetterEqualFold, no specials, no non-letters. -// -// The letters S and K are special because they map to 3 runes, not just 2: -// * S maps to s and to U+017F 'Å¿' Latin small letter long s -// * k maps to K and to U+212A 'K' Kelvin sign -// See http://play.golang.org/p/tTxjOc0OGo -// -// The returned function is specialized for matching against s and -// should only be given s. It's not curried for performance reasons. -func foldFunc(s []byte) func(s, t []byte) bool { - nonLetter := false - special := false // special letter - for _, b := range s { - if b >= utf8.RuneSelf { - return bytes.EqualFold - } - upper := b & caseMask - if upper < 'A' || upper > 'Z' { - nonLetter = true - } else if upper == 'K' || upper == 'S' { - // See above for why these letters are special. - special = true - } - } - if special { - return equalFoldRight - } - if nonLetter { - return asciiEqualFold - } - return simpleLetterEqualFold -} - -// equalFoldRight is a specialization of bytes.EqualFold when s is -// known to be all ASCII (including punctuation), but contains an 's', -// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t. -// See comments on foldFunc. -func equalFoldRight(s, t []byte) bool { - for _, sb := range s { - if len(t) == 0 { - return false - } - tb := t[0] - if tb < utf8.RuneSelf { - if sb != tb { - sbUpper := sb & caseMask - if 'A' <= sbUpper && sbUpper <= 'Z' { - if sbUpper != tb&caseMask { - return false - } - } else { - return false - } - } - t = t[1:] - continue - } - // sb is ASCII and t is not. t must be either kelvin - // sign or long s; sb must be s, S, k, or K. - tr, size := utf8.DecodeRune(t) - switch sb { - case 's', 'S': - if tr != smallLongEss { - return false - } - case 'k', 'K': - if tr != kelvin { - return false - } - default: - return false - } - t = t[size:] - - } - if len(t) > 0 { - return false - } - return true -} - -// asciiEqualFold is a specialization of bytes.EqualFold for use when -// s is all ASCII (but may contain non-letters) and contains no -// special-folding letters. -// See comments on foldFunc. -func asciiEqualFold(s, t []byte) bool { - if len(s) != len(t) { - return false - } - for i, sb := range s { - tb := t[i] - if sb == tb { - continue - } - if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') { - if sb&caseMask != tb&caseMask { - return false - } - } else { - return false - } - } - return true -} - -// simpleLetterEqualFold is a specialization of bytes.EqualFold for -// use when s is all ASCII letters (no underscores, etc) and also -// doesn't contain 'k', 'K', 's', or 'S'. -// See comments on foldFunc. -func simpleLetterEqualFold(s, t []byte) bool { - if len(s) != len(t) { - return false - } - for i, b := range s { - if b&caseMask != t[i]&caseMask { - return false - } - } - return true -} - -// tagOptions is the string following a comma in a struct field's "json" -// tag, or the empty string. It does not include the leading comma. -type tagOptions string - -// parseTag splits a struct field's json tag into its name and -// comma-separated options. -func parseTag(tag string) (string, tagOptions) { - if idx := strings.Index(tag, ","); idx != -1 { - return tag[:idx], tagOptions(tag[idx+1:]) - } - return tag, tagOptions("") -} - -// Contains reports whether a comma-separated list of options -// contains a particular substr flag. substr must be surrounded by a -// string boundary or commas. -func (o tagOptions) Contains(optionName string) bool { - if len(o) == 0 { - return false - } - s := string(o) - for s != "" { - var next string - i := strings.Index(s, ",") - if i >= 0 { - s, next = s[:i], s[i+1:] - } - if s == optionName { - return true - } - s = next - } - return false -} diff --git a/cmd/bpa-operator/vendor/github.com/ghodss/yaml/yaml.go b/cmd/bpa-operator/vendor/github.com/ghodss/yaml/yaml.go deleted file mode 100644 index 4fb4054..0000000 --- a/cmd/bpa-operator/vendor/github.com/ghodss/yaml/yaml.go +++ /dev/null @@ -1,277 +0,0 @@ -package yaml - -import ( - "bytes" - "encoding/json" - "fmt" - "reflect" - "strconv" - - "gopkg.in/yaml.v2" -) - -// Marshals the object into JSON then converts JSON to YAML and returns the -// YAML. -func Marshal(o interface{}) ([]byte, error) { - j, err := json.Marshal(o) - if err != nil { - return nil, fmt.Errorf("error marshaling into JSON: %v", err) - } - - y, err := JSONToYAML(j) - if err != nil { - return nil, fmt.Errorf("error converting JSON to YAML: %v", err) - } - - return y, nil -} - -// Converts YAML to JSON then uses JSON to unmarshal into an object. -func Unmarshal(y []byte, o interface{}) error { - vo := reflect.ValueOf(o) - j, err := yamlToJSON(y, &vo) - if err != nil { - return fmt.Errorf("error converting YAML to JSON: %v", err) - } - - err = json.Unmarshal(j, o) - if err != nil { - return fmt.Errorf("error unmarshaling JSON: %v", err) - } - - return nil -} - -// Convert JSON to YAML. -func JSONToYAML(j []byte) ([]byte, error) { - // Convert the JSON to an object. - var jsonObj interface{} - // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the - // Go JSON library doesn't try to pick the right number type (int, float, - // etc.) when unmarshalling to interface{}, it just picks float64 - // universally. go-yaml does go through the effort of picking the right - // number type, so we can preserve number type throughout this process. - err := yaml.Unmarshal(j, &jsonObj) - if err != nil { - return nil, err - } - - // Marshal this object into YAML. - return yaml.Marshal(jsonObj) -} - -// Convert YAML to JSON. Since JSON is a subset of YAML, passing JSON through -// this method should be a no-op. -// -// Things YAML can do that are not supported by JSON: -// * In YAML you can have binary and null keys in your maps. These are invalid -// in JSON. (int and float keys are converted to strings.) -// * Binary data in YAML with the !!binary tag is not supported. If you want to -// use binary data with this library, encode the data as base64 as usual but do -// not use the !!binary tag in your YAML. This will ensure the original base64 -// encoded data makes it all the way through to the JSON. -func YAMLToJSON(y []byte) ([]byte, error) { - return yamlToJSON(y, nil) -} - -func yamlToJSON(y []byte, jsonTarget *reflect.Value) ([]byte, error) { - // Convert the YAML to an object. - var yamlObj interface{} - err := yaml.Unmarshal(y, &yamlObj) - if err != nil { - return nil, err - } - - // YAML objects are not completely compatible with JSON objects (e.g. you - // can have non-string keys in YAML). So, convert the YAML-compatible object - // to a JSON-compatible object, failing with an error if irrecoverable - // incompatibilties happen along the way. - jsonObj, err := convertToJSONableObject(yamlObj, jsonTarget) - if err != nil { - return nil, err - } - - // Convert this object to JSON and return the data. - return json.Marshal(jsonObj) -} - -func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) { - var err error - - // Resolve jsonTarget to a concrete value (i.e. not a pointer or an - // interface). We pass decodingNull as false because we're not actually - // decoding into the value, we're just checking if the ultimate target is a - // string. - if jsonTarget != nil { - ju, tu, pv := indirect(*jsonTarget, false) - // We have a JSON or Text Umarshaler at this level, so we can't be trying - // to decode into a string. - if ju != nil || tu != nil { - jsonTarget = nil - } else { - jsonTarget = &pv - } - } - - // If yamlObj is a number or a boolean, check if jsonTarget is a string - - // if so, coerce. Else return normal. - // If yamlObj is a map or array, find the field that each key is - // unmarshaling to, and when you recurse pass the reflect.Value for that - // field back into this function. - switch typedYAMLObj := yamlObj.(type) { - case map[interface{}]interface{}: - // JSON does not support arbitrary keys in a map, so we must convert - // these keys to strings. - // - // From my reading of go-yaml v2 (specifically the resolve function), - // keys can only have the types string, int, int64, float64, binary - // (unsupported), or null (unsupported). - strMap := make(map[string]interface{}) - for k, v := range typedYAMLObj { - // Resolve the key to a string first. - var keyString string - switch typedKey := k.(type) { - case string: - keyString = typedKey - case int: - keyString = strconv.Itoa(typedKey) - case int64: - // go-yaml will only return an int64 as a key if the system - // architecture is 32-bit and the key's value is between 32-bit - // and 64-bit. Otherwise the key type will simply be int. - keyString = strconv.FormatInt(typedKey, 10) - case float64: - // Stolen from go-yaml to use the same conversion to string as - // the go-yaml library uses to convert float to string when - // Marshaling. - s := strconv.FormatFloat(typedKey, 'g', -1, 32) - switch s { - case "+Inf": - s = ".inf" - case "-Inf": - s = "-.inf" - case "NaN": - s = ".nan" - } - keyString = s - case bool: - if typedKey { - keyString = "true" - } else { - keyString = "false" - } - default: - return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v", - reflect.TypeOf(k), k, v) - } - - // jsonTarget should be a struct or a map. If it's a struct, find - // the field it's going to map to and pass its reflect.Value. If - // it's a map, find the element type of the map and pass the - // reflect.Value created from that type. If it's neither, just pass - // nil - JSON conversion will error for us if it's a real issue. - if jsonTarget != nil { - t := *jsonTarget - if t.Kind() == reflect.Struct { - keyBytes := []byte(keyString) - // Find the field that the JSON library would use. - var f *field - fields := cachedTypeFields(t.Type()) - for i := range fields { - ff := &fields[i] - if bytes.Equal(ff.nameBytes, keyBytes) { - f = ff - break - } - // Do case-insensitive comparison. - if f == nil && ff.equalFold(ff.nameBytes, keyBytes) { - f = ff - } - } - if f != nil { - // Find the reflect.Value of the most preferential - // struct field. - jtf := t.Field(f.index[0]) - strMap[keyString], err = convertToJSONableObject(v, &jtf) - if err != nil { - return nil, err - } - continue - } - } else if t.Kind() == reflect.Map { - // Create a zero value of the map's element type to use as - // the JSON target. - jtv := reflect.Zero(t.Type().Elem()) - strMap[keyString], err = convertToJSONableObject(v, &jtv) - if err != nil { - return nil, err - } - continue - } - } - strMap[keyString], err = convertToJSONableObject(v, nil) - if err != nil { - return nil, err - } - } - return strMap, nil - case []interface{}: - // We need to recurse into arrays in case there are any - // map[interface{}]interface{}'s inside and to convert any - // numbers to strings. - - // If jsonTarget is a slice (which it really should be), find the - // thing it's going to map to. If it's not a slice, just pass nil - // - JSON conversion will error for us if it's a real issue. - var jsonSliceElemValue *reflect.Value - if jsonTarget != nil { - t := *jsonTarget - if t.Kind() == reflect.Slice { - // By default slices point to nil, but we need a reflect.Value - // pointing to a value of the slice type, so we create one here. - ev := reflect.Indirect(reflect.New(t.Type().Elem())) - jsonSliceElemValue = &ev - } - } - - // Make and use a new array. - arr := make([]interface{}, len(typedYAMLObj)) - for i, v := range typedYAMLObj { - arr[i], err = convertToJSONableObject(v, jsonSliceElemValue) - if err != nil { - return nil, err - } - } - return arr, nil - default: - // If the target type is a string and the YAML type is a number, - // convert the YAML type to a string. - if jsonTarget != nil && (*jsonTarget).Kind() == reflect.String { - // Based on my reading of go-yaml, it may return int, int64, - // float64, or uint64. - var s string - switch typedVal := typedYAMLObj.(type) { - case int: - s = strconv.FormatInt(int64(typedVal), 10) - case int64: - s = strconv.FormatInt(typedVal, 10) - case float64: - s = strconv.FormatFloat(typedVal, 'g', -1, 32) - case uint64: - s = strconv.FormatUint(typedVal, 10) - case bool: - if typedVal { - s = "true" - } else { - s = "false" - } - } - if len(s) > 0 { - yamlObj = interface{}(s) - } - } - return yamlObj, nil - } - - return nil, nil -} diff --git a/cmd/bpa-operator/vendor/github.com/go-logr/logr/LICENSE b/cmd/bpa-operator/vendor/github.com/go-logr/logr/LICENSE deleted file mode 100644 index 8dada3e..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-logr/logr/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/cmd/bpa-operator/vendor/github.com/go-logr/logr/README.md b/cmd/bpa-operator/vendor/github.com/go-logr/logr/README.md deleted file mode 100644 index 26296d0..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-logr/logr/README.md +++ /dev/null @@ -1,36 +0,0 @@ -# A more minimal logging API for Go - -Before you consider this package, please read [this blog post by the inimitable -Dave Cheney](http://dave.cheney.net/2015/11/05/lets-talk-about-logging). I -really appreciate what he has to say, and it largely aligns with my own -experiences. Too many choices of levels means inconsistent logs. - -This package offers a purely abstract interface, based on these ideas but with -a few twists. Code can depend on just this interface and have the actual -logging implementation be injected from callers. Ideally only `main()` knows -what logging implementation is being used. - -# Differences from Dave's ideas - -The main differences are: - -1) Dave basically proposes doing away with the notion of a logging API in favor -of `fmt.Printf()`. I disagree, especially when you consider things like output -locations, timestamps, file and line decorations, and structured logging. I -restrict the API to just 2 types of logs: info and error. - -Info logs are things you want to tell the user which are not errors. Error -logs are, well, errors. If your code receives an `error` from a subordinate -function call and is logging that `error` *and not returning it*, use error -logs. - -2) Verbosity-levels on info logs. This gives developers a chance to indicate -arbitrary grades of importance for info logs, without assigning names with -semantic meaning such as "warning", "trace", and "debug". Superficially this -may feel very similar, but the primary difference is the lack of semantics. -Because verbosity is a numerical value, it's safe to assume that an app running -with higher verbosity means more (and less important) logs will be generated. - -This is a BETA grade API. I have implemented it for -[glog](https://godoc.org/github.com/golang/glog). Until there is a significant -2nd implementation, I don't really know how it will change. diff --git a/cmd/bpa-operator/vendor/github.com/go-logr/logr/logr.go b/cmd/bpa-operator/vendor/github.com/go-logr/logr/logr.go deleted file mode 100644 index ad72e78..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-logr/logr/logr.go +++ /dev/null @@ -1,151 +0,0 @@ -// Package logr defines abstract interfaces for logging. Packages can depend on -// these interfaces and callers can implement logging in whatever way is -// appropriate. -// -// This design derives from Dave Cheney's blog: -// http://dave.cheney.net/2015/11/05/lets-talk-about-logging -// -// This is a BETA grade API. Until there is a significant 2nd implementation, -// I don't really know how it will change. -// -// The logging specifically makes it non-trivial to use format strings, to encourage -// attaching structured information instead of unstructured format strings. -// -// Usage -// -// Logging is done using a Logger. Loggers can have name prefixes and named values -// attached, so that all log messages logged with that Logger have some base context -// associated. -// -// The term "key" is used to refer to the name associated with a particular value, to -// disambiguate it from the general Logger name. -// -// For instance, suppose we're trying to reconcile the state of an object, and we want -// to log that we've made some decision. -// -// With the traditional log package, we might write -// log.Printf( -// "decided to set field foo to value %q for object %s/%s", -// targetValue, object.Namespace, object.Name) -// -// With logr's structured logging, we'd write -// // elsewhere in the file, set up the logger to log with the prefix of "reconcilers", -// // and the named value target-type=Foo, for extra context. -// log := mainLogger.WithName("reconcilers").WithValues("target-type", "Foo") -// -// // later on... -// log.Info("setting field foo on object", "value", targetValue, "object", object) -// -// Depending on our logging implementation, we could then make logging decisions based on field values -// (like only logging such events for objects in a certain namespace), or copy the structured -// information into a structured log store. -// -// For logging errors, Logger has a method called Error. Suppose we wanted to log an -// error while reconciling. With the traditional log package, we might write -// log.Errorf("unable to reconcile object %s/%s: %v", object.Namespace, object.Name, err) -// -// With logr, we'd instead write -// // assuming the above setup for log -// log.Error(err, "unable to reconcile object", "object", object) -// -// This functions similarly to: -// log.Info("unable to reconcile object", "error", err, "object", object) -// -// However, it ensures that a standard key for the error value ("error") is used across all -// error logging. Furthermore, certain implementations may choose to attach additional -// information (such as stack traces) on calls to Error, so it's preferred to use Error -// to log errors. -// -// Parts of a log line -// -// Each log message from a Logger has four types of context: -// logger name, log verbosity, log message, and the named values. -// -// The Logger name constists of a series of name "segments" added by successive calls to WithName. -// These name segments will be joined in some way by the underlying implementation. It is strongly -// reccomended that name segements contain simple identifiers (letters, digits, and hyphen), and do -// not contain characters that could muddle the log output or confuse the joining operation (e.g. -// whitespace, commas, periods, slashes, brackets, quotes, etc). -// -// Log verbosity represents how little a log matters. Level zero, the default, matters most. -// Increasing levels matter less and less. Try to avoid lots of different verbosity levels, -// and instead provide useful keys, logger names, and log messages for users to filter on. -// It's illegal to pass a log level below zero. -// -// The log message consists of a constant message attached to the the log line. This -// should generally be a simple description of what's occuring, and should never be a format string. -// -// Variable information can then be attached using named values (key/value pairs). Keys are arbitrary -// strings, while values may be any Go value. -// -// Key Naming Conventions -// -// While users are generally free to use key names of their choice, it's generally best to avoid -// using the following keys, as they're frequently used by implementations: -// -// - `"error"`: the underlying error value in the `Error` method. -// - `"stacktrace"`: the stack trace associated with a particular log line or error -// (often from the `Error` message). -// - `"caller"`: the calling information (file/line) of a particular log line. -// - `"msg"`: the log message. -// - `"level"`: the log level. -// - `"ts"`: the timestamp for a log line. -// -// Implementations are encouraged to make use of these keys to represent the above -// concepts, when neccessary (for example, in a pure-JSON output form, it would be -// necessary to represent at least message and timestamp as ordinary named values). -package logr - -// TODO: consider adding back in format strings if they're really needed -// TODO: consider other bits of zap/zapcore functionality like ObjectMarshaller (for arbitrary objects) -// TODO: consider other bits of glog functionality like Flush, InfoDepth, OutputStats - -// InfoLogger represents the ability to log non-error messages, at a particular verbosity. -type InfoLogger interface { - // Info logs a non-error message with the given key/value pairs as context. - // - // The msg argument should be used to add some constant description to - // the log line. The key/value pairs can then be used to add additional - // variable information. The key/value pairs should alternate string - // keys and arbitrary values. - Info(msg string, keysAndValues ...interface{}) - - // Enabled tests whether this InfoLogger is enabled. For example, - // commandline flags might be used to set the logging verbosity and disable - // some info logs. - Enabled() bool -} - -// Logger represents the ability to log messages, both errors and not. -type Logger interface { - // All Loggers implement InfoLogger. Calling InfoLogger methods directly on - // a Logger value is equivalent to calling them on a V(0) InfoLogger. For - // example, logger.Info() produces the same result as logger.V(0).Info. - InfoLogger - - // Error logs an error, with the given message and key/value pairs as context. - // It functions similarly to calling Info with the "error" named value, but may - // have unique behavior, and should be preferred for logging errors (see the - // package documentations for more information). - // - // The msg field should be used to add context to any underlying error, - // while the err field should be used to attach the actual error that - // triggered this log line, if present. - Error(err error, msg string, keysAndValues ...interface{}) - - // V returns an InfoLogger value for a specific verbosity level. A higher - // verbosity level means a log message is less important. It's illegal to - // pass a log level less than zero. - V(level int) InfoLogger - - // WithValues adds some key-value pairs of context to a logger. - // See Info for documentation on how key/value pairs work. - WithValues(keysAndValues ...interface{}) Logger - - // WithName adds a new element to the logger's name. - // Successive calls with WithName continue to append - // suffixes to the logger's name. It's strongly reccomended - // that name segments contain only letters, digits, and hyphens - // (see the package documentation for more information). - WithName(name string) Logger -} diff --git a/cmd/bpa-operator/vendor/github.com/go-logr/zapr/.gitignore b/cmd/bpa-operator/vendor/github.com/go-logr/zapr/.gitignore deleted file mode 100644 index 5ba7772..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-logr/zapr/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -*~ -*.swp -/vendor diff --git a/cmd/bpa-operator/vendor/github.com/go-logr/zapr/Gopkg.lock b/cmd/bpa-operator/vendor/github.com/go-logr/zapr/Gopkg.lock deleted file mode 100644 index 8da0a8f..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-logr/zapr/Gopkg.lock +++ /dev/null @@ -1,52 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - digest = "1:edd2fa4578eb086265db78a9201d15e76b298dfd0d5c379da83e9c61712cf6df" - name = "github.com/go-logr/logr" - packages = ["."] - pruneopts = "UT" - revision = "9fb12b3b21c5415d16ac18dc5cd42c1cfdd40c4e" - version = "v0.1.0" - -[[projects]] - digest = "1:3c1a69cdae3501bf75e76d0d86dc6f2b0a7421bc205c0cb7b96b19eed464a34d" - name = "go.uber.org/atomic" - packages = ["."] - pruneopts = "UT" - revision = "1ea20fb1cbb1cc08cbd0d913a96dead89aa18289" - version = "v1.3.2" - -[[projects]] - digest = "1:60bf2a5e347af463c42ed31a493d817f8a72f102543060ed992754e689805d1a" - name = "go.uber.org/multierr" - packages = ["."] - pruneopts = "UT" - revision = "3c4937480c32f4c13a875a1829af76c98ca3d40a" - version = "v1.1.0" - -[[projects]] - digest = "1:9580b1b079114140ade8cec957685344d14f00119e0241f6b369633cb346eeb3" - name = "go.uber.org/zap" - packages = [ - ".", - "buffer", - "internal/bufferpool", - "internal/color", - "internal/exit", - "zapcore", - ] - pruneopts = "UT" - revision = "eeedf312bc6c57391d84767a4cd413f02a917974" - version = "v1.8.0" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - input-imports = [ - "github.com/go-logr/logr", - "go.uber.org/zap", - "go.uber.org/zap/zapcore", - ] - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/cmd/bpa-operator/vendor/github.com/go-logr/zapr/Gopkg.toml b/cmd/bpa-operator/vendor/github.com/go-logr/zapr/Gopkg.toml deleted file mode 100644 index ae475d7..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-logr/zapr/Gopkg.toml +++ /dev/null @@ -1,38 +0,0 @@ -# Gopkg.toml example -# -# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md -# for detailed Gopkg.toml documentation. -# -# required = ["github.com/user/thing/cmd/thing"] -# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] -# -# [[constraint]] -# name = "github.com/user/project" -# version = "1.0.0" -# -# [[constraint]] -# name = "github.com/user/project2" -# branch = "dev" -# source = "github.com/myfork/project2" -# -# [[override]] -# name = "github.com/x/y" -# version = "2.4.0" -# -# [prune] -# non-go = false -# go-tests = true -# unused-packages = true - - -[[constraint]] - name = "github.com/go-logr/logr" - version = "0.1.0" - -[[constraint]] - name = "go.uber.org/zap" - version = "1.8.0" - -[prune] - go-tests = true - unused-packages = true diff --git a/cmd/bpa-operator/vendor/github.com/go-logr/zapr/LICENSE b/cmd/bpa-operator/vendor/github.com/go-logr/zapr/LICENSE deleted file mode 100644 index 8dada3e..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-logr/zapr/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/cmd/bpa-operator/vendor/github.com/go-logr/zapr/README.md b/cmd/bpa-operator/vendor/github.com/go-logr/zapr/README.md deleted file mode 100644 index 8472875..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-logr/zapr/README.md +++ /dev/null @@ -1,45 +0,0 @@ -Zapr :zap: -========== - -A [logr](https://github.com/go-logr/logr) implementation using -[Zap](go.uber.org/zap). - -Usage ------ - -```go -import ( - "fmt" - - "go.uber.org/zap" - "github.com/go-logr/logr" - "github.com/directxman12/zapr" -) - -func main() { - var log logr.Logger - - zapLog, err := zap.NewDevelopment() - if err != nil { - panic(fmt.Sprintf("who watches the watchmen (%v)?", err)) - } - log = zapr.NewLogger(zapLog) - - log.Info("Logr in action!", "the answer", 42) -} -``` - -Implementation Details ----------------------- - -For the most part, concepts in Zap correspond directly with those in logr. - -Unlike Zap, all fields *must* be in the form of suggared fields -- -it's illegal to pass a strongly-typed Zap field in a key position to any -of the logging methods (`Log`, `Error`). - -Levels in logr correspond to custom debug levels in Zap. Any given level -in logr is represents by its inverse in Zap (`zapLevel = -1*logrLevel`). - -For example `V(2)` is equivalent to log level -2 in Zap, while `V(1)` is -equivalent to Zap's `DebugLevel`. diff --git a/cmd/bpa-operator/vendor/github.com/go-logr/zapr/zapr.go b/cmd/bpa-operator/vendor/github.com/go-logr/zapr/zapr.go deleted file mode 100644 index a9a10ae..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-logr/zapr/zapr.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2018 Solly Ross -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// package zapr defines an implementation of the github.com/go-logr/logr -// interfaces built on top of Zap (go.uber.org/zap). -// -// Usage -// -// A new logr.Logger can be constructed from an existing zap.Logger using -// the NewLogger function: -// -// log := zapr.NewLogger(someZapLogger) -// -// Implementation Details -// -// For the most part, concepts in Zap correspond directly with those in -// logr. -// -// Unlike Zap, all fields *must* be in the form of suggared fields -- -// it's illegal to pass a strongly-typed Zap field in a key position -// to any of the log methods. -// -// Levels in logr correspond to custom debug levels in Zap. Any given level -// in logr is represents by its inverse in zap (`zapLevel = -1*logrLevel`). -// For example V(2) is equivalent to log level -2 in Zap, while V(1) is -// equivalent to Zap's DebugLevel. -package zapr - -import ( - "github.com/go-logr/logr" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" -) - -// noopInfoLogger is a logr.InfoLogger that's always disabled, and does nothing. -type noopInfoLogger struct{} - -func (l *noopInfoLogger) Enabled() bool { return false } -func (l *noopInfoLogger) Info(_ string, _ ...interface{}) {} - -var disabledInfoLogger = &noopInfoLogger{} - -// NB: right now, we always use the equivalent of sugared logging. -// This is necessary, since logr doesn't define non-suggared types, -// and using zap-specific non-suggared types would make uses tied -// directly to Zap. - -// infoLogger is a logr.InfoLogger that uses Zap to log at a particular -// level. The level has already been converted to a Zap level, which -// is to say that `logrLevel = -1*zapLevel`. -type infoLogger struct { - lvl zapcore.Level - l *zap.Logger -} - -func (l *infoLogger) Enabled() bool { return true } -func (l *infoLogger) Info(msg string, keysAndVals ...interface{}) { - if checkedEntry := l.l.Check(l.lvl, msg); checkedEntry != nil { - checkedEntry.Write(handleFields(l.l, keysAndVals)...) - } -} - -// zapLogger is a logr.Logger that uses Zap to log. -type zapLogger struct { - // NB: this looks very similar to zap.SugaredLogger, but - // deals with our desire to have multiple verbosity levels. - l *zap.Logger - infoLogger -} - -// handleFields converts a bunch of arbitrary key-value pairs into Zap fields. It takes -// additional pre-converted Zap fields, for use with automatically attached fields, like -// `error`. -func handleFields(l *zap.Logger, args []interface{}, additional ...zap.Field) []zap.Field { - // a slightly modified version of zap.SugaredLogger.sweetenFields - if len(args) == 0 { - // fast-return if we have no suggared fields. - return additional - } - - // unlike Zap, we can be pretty sure users aren't passing structured - // fields (since logr has no concept of that), so guess that we need a - // little less space. - fields := make([]zap.Field, 0, len(args)/2+len(additional)) - for i := 0; i < len(args); { - // check just in case for strongly-typed Zap fields, which is illegal (since - // it breaks implementation agnosticism), so we can give a better error message. - if _, ok := args[i].(zap.Field); ok { - l.DPanic("strongly-typed Zap Field passed to logr", zap.Any("zap field", args[i])) - break - } - - // make sure this isn't a mismatched key - if i == len(args)-1 { - l.DPanic("odd number of arguments passed as key-value pairs for logging", zap.Any("ignored key", args[i])) - break - } - - // process a key-value pair, - // ensuring that the key is a string - key, val := args[i], args[i+1] - keyStr, isString := key.(string) - if !isString { - // if the key isn't a string, DPanic and stop logging - l.DPanic("non-string key argument passed to logging, ignoring all later arguments", zap.Any("invalid key", key)) - break - } - - fields = append(fields, zap.Any(keyStr, val)) - i += 2 - } - - return append(fields, additional...) -} - -func (l *zapLogger) Error(err error, msg string, keysAndVals ...interface{}) { - if checkedEntry := l.l.Check(zap.ErrorLevel, msg); checkedEntry != nil { - checkedEntry.Write(handleFields(l.l, keysAndVals, zap.Error(err))...) - } -} - -func (l *zapLogger) V(level int) logr.InfoLogger { - lvl := zapcore.Level(-1 * level) - if l.l.Core().Enabled(lvl) { - return &infoLogger{ - lvl: lvl, - l: l.l, - } - } - return disabledInfoLogger -} - -func (l *zapLogger) WithValues(keysAndValues ...interface{}) logr.Logger { - newLogger := l.l.With(handleFields(l.l, keysAndValues)...) - return NewLogger(newLogger) -} - -func (l *zapLogger) WithName(name string) logr.Logger { - newLogger := l.l.Named(name) - return NewLogger(newLogger) -} - -// NewLogger creates a new logr.Logger using the given Zap Logger to log. -func NewLogger(l *zap.Logger) logr.Logger { - return &zapLogger{ - l: l, - infoLogger: infoLogger{ - l: l, - lvl: zap.InfoLevel, - }, - } -} diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/jsonpointer/.editorconfig b/cmd/bpa-operator/vendor/github.com/go-openapi/jsonpointer/.editorconfig deleted file mode 100644 index 3152da6..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/jsonpointer/.editorconfig +++ /dev/null @@ -1,26 +0,0 @@ -# top-most EditorConfig file -root = true - -# Unix-style newlines with a newline ending every file -[*] -end_of_line = lf -insert_final_newline = true -indent_style = space -indent_size = 2 -trim_trailing_whitespace = true - -# Set default charset -[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] -charset = utf-8 - -# Tab indentation (no size specified) -[*.go] -indent_style = tab - -[*.md] -trim_trailing_whitespace = false - -# Matches the exact files either package.json or .travis.yml -[{package.json,.travis.yml}] -indent_style = space -indent_size = 2 diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/jsonpointer/.gitignore b/cmd/bpa-operator/vendor/github.com/go-openapi/jsonpointer/.gitignore deleted file mode 100644 index 769c244..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/jsonpointer/.gitignore +++ /dev/null @@ -1 +0,0 @@ -secrets.yml diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/jsonpointer/.travis.yml b/cmd/bpa-operator/vendor/github.com/go-openapi/jsonpointer/.travis.yml deleted file mode 100644 index 3436c45..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/jsonpointer/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -after_success: -- bash <(curl -s https://codecov.io/bash) -go: -- '1.9' -- 1.10.x -- 1.11.x -install: -- go get -u github.com/stretchr/testify/assert -- go get -u github.com/go-openapi/swag -language: go -notifications: - slack: - secure: a5VgoiwB1G/AZqzmephPZIhEB9avMlsWSlVnM1dSAtYAwdrQHGTQxAmpOxYIoSPDhWNN5bfZmjd29++UlTwLcHSR+e0kJhH6IfDlsHj/HplNCJ9tyI0zYc7XchtdKgeMxMzBKCzgwFXGSbQGydXTliDNBo0HOzmY3cou/daMFTP60K+offcjS+3LRAYb1EroSRXZqrk1nuF/xDL3792DZUdPMiFR/L/Df6y74D6/QP4sTkTDFQitz4Wy/7jbsfj8dG6qK2zivgV6/l+w4OVjFkxVpPXogDWY10vVXNVynqxfJ7to2d1I9lNCHE2ilBCkWMIPdyJF7hjF8pKW+82yP4EzRh0vu8Xn0HT5MZpQxdRY/YMxNrWaG7SxsoEaO4q5uhgdzAqLYY3TRa7MjIK+7Ur+aqOeTXn6OKwVi0CjvZ6mIU3WUKSwiwkFZMbjRAkSb5CYwMEfGFO/z964xz83qGt6WAtBXNotqCQpTIiKtDHQeLOMfksHImCg6JLhQcWBVxamVgu0G3Pdh8Y6DyPnxraXY95+QDavbjqv7TeYT9T/FNnrkXaTTK0s4iWE5H4ACU0Qvz0wUYgfQrZv0/Hp7V17+rabUwnzYySHCy9SWX/7OV9Cfh31iMp9ZIffr76xmmThtOEqs8TrTtU6BWI3rWwvA9cXQipZTVtL0oswrGw= -script: -- go test -v -race -cover -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md b/cmd/bpa-operator/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md deleted file mode 100644 index 9322b06..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/jsonpointer/LICENSE b/cmd/bpa-operator/vendor/github.com/go-openapi/jsonpointer/LICENSE deleted file mode 100644 index d645695..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/jsonpointer/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/jsonpointer/README.md b/cmd/bpa-operator/vendor/github.com/go-openapi/jsonpointer/README.md deleted file mode 100644 index 813788a..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/jsonpointer/README.md +++ /dev/null @@ -1,15 +0,0 @@ -# gojsonpointer [![Build Status](https://travis-ci.org/go-openapi/jsonpointer.svg?branch=master)](https://travis-ci.org/go-openapi/jsonpointer) [![codecov](https://codecov.io/gh/go-openapi/jsonpointer/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonpointer) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) - -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonpointer/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/jsonpointer?status.svg)](http://godoc.org/github.com/go-openapi/jsonpointer) -An implementation of JSON Pointer - Go language - -## Status -Completed YES - -Tested YES - -## References -http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 - -### Note -The 4.Evaluation part of the previous reference, starting with 'If the currently referenced value is a JSON array, the reference token MUST contain either...' is not implemented. diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/jsonpointer/go.mod b/cmd/bpa-operator/vendor/github.com/go-openapi/jsonpointer/go.mod deleted file mode 100644 index eb4d623..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/jsonpointer/go.mod +++ /dev/null @@ -1,10 +0,0 @@ -module github.com/go-openapi/jsonpointer - -require ( - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/go-openapi/swag v0.17.0 - github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/stretchr/testify v1.2.2 - gopkg.in/yaml.v2 v2.2.1 // indirect -) diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/jsonpointer/go.sum b/cmd/bpa-operator/vendor/github.com/go-openapi/jsonpointer/go.sum deleted file mode 100644 index c71f4d7..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/jsonpointer/go.sum +++ /dev/null @@ -1,11 +0,0 @@ -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/go-openapi/swag v0.17.0 h1:7wu+dZ5k83kvUWeAb+WUkFiUhDzwGqzTR/NhWzeo1JU= -github.com/go-openapi/swag v0.17.0/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 h1:2gxZ0XQIU/5z3Z3bUBu+FXuk2pFbkN6tcwi/pjyaDic= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/jsonpointer/pointer.go b/cmd/bpa-operator/vendor/github.com/go-openapi/jsonpointer/pointer.go deleted file mode 100644 index fe2d6ee..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/jsonpointer/pointer.go +++ /dev/null @@ -1,390 +0,0 @@ -// Copyright 2013 sigu-399 ( https://github.com/sigu-399 ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author sigu-399 -// author-github https://github.com/sigu-399 -// author-mail sigu.399@gmail.com -// -// repository-name jsonpointer -// repository-desc An implementation of JSON Pointer - Go language -// -// description Main and unique file. -// -// created 25-02-2013 - -package jsonpointer - -import ( - "errors" - "fmt" - "reflect" - "strconv" - "strings" - - "github.com/go-openapi/swag" -) - -const ( - emptyPointer = `` - pointerSeparator = `/` - - invalidStart = `JSON pointer must be empty or start with a "` + pointerSeparator -) - -var jsonPointableType = reflect.TypeOf(new(JSONPointable)).Elem() -var jsonSetableType = reflect.TypeOf(new(JSONSetable)).Elem() - -// JSONPointable is an interface for structs to implement when they need to customize the -// json pointer process -type JSONPointable interface { - JSONLookup(string) (interface{}, error) -} - -// JSONSetable is an interface for structs to implement when they need to customize the -// json pointer process -type JSONSetable interface { - JSONSet(string, interface{}) error -} - -// New creates a new json pointer for the given string -func New(jsonPointerString string) (Pointer, error) { - - var p Pointer - err := p.parse(jsonPointerString) - return p, err - -} - -// Pointer the json pointer reprsentation -type Pointer struct { - referenceTokens []string -} - -// "Constructor", parses the given string JSON pointer -func (p *Pointer) parse(jsonPointerString string) error { - - var err error - - if jsonPointerString != emptyPointer { - if !strings.HasPrefix(jsonPointerString, pointerSeparator) { - err = errors.New(invalidStart) - } else { - referenceTokens := strings.Split(jsonPointerString, pointerSeparator) - for _, referenceToken := range referenceTokens[1:] { - p.referenceTokens = append(p.referenceTokens, referenceToken) - } - } - } - - return err -} - -// Get uses the pointer to retrieve a value from a JSON document -func (p *Pointer) Get(document interface{}) (interface{}, reflect.Kind, error) { - return p.get(document, swag.DefaultJSONNameProvider) -} - -// Set uses the pointer to set a value from a JSON document -func (p *Pointer) Set(document interface{}, value interface{}) (interface{}, error) { - return document, p.set(document, value, swag.DefaultJSONNameProvider) -} - -// GetForToken gets a value for a json pointer token 1 level deep -func GetForToken(document interface{}, decodedToken string) (interface{}, reflect.Kind, error) { - return getSingleImpl(document, decodedToken, swag.DefaultJSONNameProvider) -} - -// SetForToken gets a value for a json pointer token 1 level deep -func SetForToken(document interface{}, decodedToken string, value interface{}) (interface{}, error) { - return document, setSingleImpl(document, value, decodedToken, swag.DefaultJSONNameProvider) -} - -func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) { - rValue := reflect.Indirect(reflect.ValueOf(node)) - kind := rValue.Kind() - - switch kind { - - case reflect.Struct: - if rValue.Type().Implements(jsonPointableType) { - r, err := node.(JSONPointable).JSONLookup(decodedToken) - if err != nil { - return nil, kind, err - } - return r, kind, nil - } - nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) - if !ok { - return nil, kind, fmt.Errorf("object has no field %q", decodedToken) - } - fld := rValue.FieldByName(nm) - return fld.Interface(), kind, nil - - case reflect.Map: - kv := reflect.ValueOf(decodedToken) - mv := rValue.MapIndex(kv) - - if mv.IsValid() && !swag.IsZero(mv) { - return mv.Interface(), kind, nil - } - return nil, kind, fmt.Errorf("object has no key %q", decodedToken) - - case reflect.Slice: - tokenIndex, err := strconv.Atoi(decodedToken) - if err != nil { - return nil, kind, err - } - sLength := rValue.Len() - if tokenIndex < 0 || tokenIndex >= sLength { - return nil, kind, fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength-1, tokenIndex) - } - - elem := rValue.Index(tokenIndex) - return elem.Interface(), kind, nil - - default: - return nil, kind, fmt.Errorf("invalid token reference %q", decodedToken) - } - -} - -func setSingleImpl(node, data interface{}, decodedToken string, nameProvider *swag.NameProvider) error { - rValue := reflect.Indirect(reflect.ValueOf(node)) - switch rValue.Kind() { - - case reflect.Struct: - if ns, ok := node.(JSONSetable); ok { // pointer impl - return ns.JSONSet(decodedToken, data) - } - - if rValue.Type().Implements(jsonSetableType) { - return node.(JSONSetable).JSONSet(decodedToken, data) - } - - nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) - if !ok { - return fmt.Errorf("object has no field %q", decodedToken) - } - fld := rValue.FieldByName(nm) - if fld.IsValid() { - fld.Set(reflect.ValueOf(data)) - } - return nil - - case reflect.Map: - kv := reflect.ValueOf(decodedToken) - rValue.SetMapIndex(kv, reflect.ValueOf(data)) - return nil - - case reflect.Slice: - tokenIndex, err := strconv.Atoi(decodedToken) - if err != nil { - return err - } - sLength := rValue.Len() - if tokenIndex < 0 || tokenIndex >= sLength { - return fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength, tokenIndex) - } - - elem := rValue.Index(tokenIndex) - if !elem.CanSet() { - return fmt.Errorf("can't set slice index %s to %v", decodedToken, data) - } - elem.Set(reflect.ValueOf(data)) - return nil - - default: - return fmt.Errorf("invalid token reference %q", decodedToken) - } - -} - -func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) { - - if nameProvider == nil { - nameProvider = swag.DefaultJSONNameProvider - } - - kind := reflect.Invalid - - // Full document when empty - if len(p.referenceTokens) == 0 { - return node, kind, nil - } - - for _, token := range p.referenceTokens { - - decodedToken := Unescape(token) - - r, knd, err := getSingleImpl(node, decodedToken, nameProvider) - if err != nil { - return nil, knd, err - } - node, kind = r, knd - - } - - rValue := reflect.ValueOf(node) - kind = rValue.Kind() - - return node, kind, nil -} - -func (p *Pointer) set(node, data interface{}, nameProvider *swag.NameProvider) error { - knd := reflect.ValueOf(node).Kind() - - if knd != reflect.Ptr && knd != reflect.Struct && knd != reflect.Map && knd != reflect.Slice && knd != reflect.Array { - return fmt.Errorf("only structs, pointers, maps and slices are supported for setting values") - } - - if nameProvider == nil { - nameProvider = swag.DefaultJSONNameProvider - } - - // Full document when empty - if len(p.referenceTokens) == 0 { - return nil - } - - lastI := len(p.referenceTokens) - 1 - for i, token := range p.referenceTokens { - isLastToken := i == lastI - decodedToken := Unescape(token) - - if isLastToken { - - return setSingleImpl(node, data, decodedToken, nameProvider) - } - - rValue := reflect.Indirect(reflect.ValueOf(node)) - kind := rValue.Kind() - - switch kind { - - case reflect.Struct: - if rValue.Type().Implements(jsonPointableType) { - r, err := node.(JSONPointable).JSONLookup(decodedToken) - if err != nil { - return err - } - fld := reflect.ValueOf(r) - if fld.CanAddr() && fld.Kind() != reflect.Interface && fld.Kind() != reflect.Map && fld.Kind() != reflect.Slice && fld.Kind() != reflect.Ptr { - node = fld.Addr().Interface() - continue - } - node = r - continue - } - nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken) - if !ok { - return fmt.Errorf("object has no field %q", decodedToken) - } - fld := rValue.FieldByName(nm) - if fld.CanAddr() && fld.Kind() != reflect.Interface && fld.Kind() != reflect.Map && fld.Kind() != reflect.Slice && fld.Kind() != reflect.Ptr { - node = fld.Addr().Interface() - continue - } - node = fld.Interface() - - case reflect.Map: - kv := reflect.ValueOf(decodedToken) - mv := rValue.MapIndex(kv) - - if !mv.IsValid() { - return fmt.Errorf("object has no key %q", decodedToken) - } - if mv.CanAddr() && mv.Kind() != reflect.Interface && mv.Kind() != reflect.Map && mv.Kind() != reflect.Slice && mv.Kind() != reflect.Ptr { - node = mv.Addr().Interface() - continue - } - node = mv.Interface() - - case reflect.Slice: - tokenIndex, err := strconv.Atoi(decodedToken) - if err != nil { - return err - } - sLength := rValue.Len() - if tokenIndex < 0 || tokenIndex >= sLength { - return fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength, tokenIndex) - } - - elem := rValue.Index(tokenIndex) - if elem.CanAddr() && elem.Kind() != reflect.Interface && elem.Kind() != reflect.Map && elem.Kind() != reflect.Slice && elem.Kind() != reflect.Ptr { - node = elem.Addr().Interface() - continue - } - node = elem.Interface() - - default: - return fmt.Errorf("invalid token reference %q", decodedToken) - } - - } - - return nil -} - -// DecodedTokens returns the decoded tokens -func (p *Pointer) DecodedTokens() []string { - result := make([]string, 0, len(p.referenceTokens)) - for _, t := range p.referenceTokens { - result = append(result, Unescape(t)) - } - return result -} - -// IsEmpty returns true if this is an empty json pointer -// this indicates that it points to the root document -func (p *Pointer) IsEmpty() bool { - return len(p.referenceTokens) == 0 -} - -// Pointer to string representation function -func (p *Pointer) String() string { - - if len(p.referenceTokens) == 0 { - return emptyPointer - } - - pointerString := pointerSeparator + strings.Join(p.referenceTokens, pointerSeparator) - - return pointerString -} - -// Specific JSON pointer encoding here -// ~0 => ~ -// ~1 => / -// ... and vice versa - -const ( - encRefTok0 = `~0` - encRefTok1 = `~1` - decRefTok0 = `~` - decRefTok1 = `/` -) - -// Unescape unescapes a json pointer reference token string to the original representation -func Unescape(token string) string { - step1 := strings.Replace(token, encRefTok1, decRefTok1, -1) - step2 := strings.Replace(step1, encRefTok0, decRefTok0, -1) - return step2 -} - -// Escape escapes a pointer reference token string -func Escape(token string) string { - step1 := strings.Replace(token, decRefTok0, encRefTok0, -1) - step2 := strings.Replace(step1, decRefTok1, encRefTok1, -1) - return step2 -} diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/jsonreference/.gitignore b/cmd/bpa-operator/vendor/github.com/go-openapi/jsonreference/.gitignore deleted file mode 100644 index 769c244..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/jsonreference/.gitignore +++ /dev/null @@ -1 +0,0 @@ -secrets.yml diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/jsonreference/.travis.yml b/cmd/bpa-operator/vendor/github.com/go-openapi/jsonreference/.travis.yml deleted file mode 100644 index 40034d2..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/jsonreference/.travis.yml +++ /dev/null @@ -1,16 +0,0 @@ -after_success: -- bash <(curl -s https://codecov.io/bash) -go: -- '1.9' -- 1.10.x -- 1.11.x -install: -- go get -u github.com/stretchr/testify/assert -- go get -u github.com/PuerkitoBio/purell -- go get -u github.com/go-openapi/jsonpointer -language: go -notifications: - slack: - secure: OpQG/36F7DSF00HLm9WZMhyqFCYYyYTsVDObW226cWiR8PWYiNfLZiSEvIzT1Gx4dDjhigKTIqcLhG34CkL5iNXDjm9Yyo2RYhQPlK8NErNqUEXuBqn4RqYHW48VGhEhOyDd4Ei0E2FN5ZbgpvHgtpkdZ6XDi64r3Ac89isP9aPHXQTuv2Jog6b4/OKKiUTftLcTIst0p4Cp3gqOJWf1wnoj+IadWiECNVQT6zb47IYjtyw6+uV8iUjTzdKcRB6Zc6b4Dq7JAg1Zd7Jfxkql3hlKp4PNlRf9Cy7y5iA3G7MLyg3FcPX5z2kmcyPt2jOTRMBWUJ5zIQpOxizAcN8WsT3WWBL5KbuYK6k0PzujrIDLqdxGpNmjkkMfDBT9cKmZpm2FdW+oZgPFJP+oKmAo4u4KJz/vjiPTXgQlN5bmrLuRMCp+AwC5wkIohTqWZVPE2TK6ZSnMYcg/W39s+RP/9mJoyryAvPSpBOLTI+biCgaUCTOAZxNTWpMFc3tPYntc41WWkdKcooZ9JA5DwfcaVFyTGQ3YXz+HvX6G1z/gW0Q/A4dBi9mj2iE1xm7tRTT+4VQ2AXFvSEI1HJpfPgYnwAtwOD1v3Qm2EUHk9sCdtEDR4wVGEPIVn44GnwFMnGKx9JWppMPYwFu3SVDdHt+E+LOlhZUply11Aa+IVrT2KUQ= -script: -- go test -v -race -cover -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md b/cmd/bpa-operator/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md deleted file mode 100644 index 9322b06..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/jsonreference/LICENSE b/cmd/bpa-operator/vendor/github.com/go-openapi/jsonreference/LICENSE deleted file mode 100644 index d645695..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/jsonreference/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/jsonreference/README.md b/cmd/bpa-operator/vendor/github.com/go-openapi/jsonreference/README.md deleted file mode 100644 index 66345f4..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/jsonreference/README.md +++ /dev/null @@ -1,15 +0,0 @@ -# gojsonreference [![Build Status](https://travis-ci.org/go-openapi/jsonreference.svg?branch=master)](https://travis-ci.org/go-openapi/jsonreference) [![codecov](https://codecov.io/gh/go-openapi/jsonreference/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonreference) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) - -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonreference/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/jsonreference?status.svg)](http://godoc.org/github.com/go-openapi/jsonreference) -An implementation of JSON Reference - Go language - -## Status -Work in progress ( 90% done ) - -## Dependencies -https://github.com/go-openapi/jsonpointer - -## References -http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 - -http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03 diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/jsonreference/go.mod b/cmd/bpa-operator/vendor/github.com/go-openapi/jsonreference/go.mod deleted file mode 100644 index 6d15a70..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/jsonreference/go.mod +++ /dev/null @@ -1,15 +0,0 @@ -module github.com/go-openapi/jsonreference - -require ( - github.com/PuerkitoBio/purell v1.1.0 - github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/go-openapi/jsonpointer v0.17.0 - github.com/go-openapi/swag v0.17.0 // indirect - github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/stretchr/testify v1.2.2 - golang.org/x/net v0.0.0-20181005035420-146acd28ed58 // indirect - golang.org/x/text v0.3.0 // indirect - gopkg.in/yaml.v2 v2.2.1 // indirect -) diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/jsonreference/go.sum b/cmd/bpa-operator/vendor/github.com/go-openapi/jsonreference/go.sum deleted file mode 100644 index ec9bdbc..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/jsonreference/go.sum +++ /dev/null @@ -1,20 +0,0 @@ -github.com/PuerkitoBio/purell v1.1.0 h1:rmGxhojJlM0tuKtfdvliR84CFHljx9ag64t2xmVkjK4= -github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/go-openapi/jsonpointer v0.17.0 h1:Bpl2DtZ6k7wKqfFs7e+4P08+M9I3FQgn09a1UsRUQbk= -github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= -github.com/go-openapi/swag v0.17.0 h1:7wu+dZ5k83kvUWeAb+WUkFiUhDzwGqzTR/NhWzeo1JU= -github.com/go-openapi/swag v0.17.0/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 h1:2gxZ0XQIU/5z3Z3bUBu+FXuk2pFbkN6tcwi/pjyaDic= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -golang.org/x/net v0.0.0-20181005035420-146acd28ed58 h1:otZG8yDCO4LVps5+9bxOeNiCvgmOyt96J3roHTYs7oE= -golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/jsonreference/reference.go b/cmd/bpa-operator/vendor/github.com/go-openapi/jsonreference/reference.go deleted file mode 100644 index 3bc0a6e..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/jsonreference/reference.go +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright 2013 sigu-399 ( https://github.com/sigu-399 ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author sigu-399 -// author-github https://github.com/sigu-399 -// author-mail sigu.399@gmail.com -// -// repository-name jsonreference -// repository-desc An implementation of JSON Reference - Go language -// -// description Main and unique file. -// -// created 26-02-2013 - -package jsonreference - -import ( - "errors" - "net/url" - "strings" - - "github.com/PuerkitoBio/purell" - "github.com/go-openapi/jsonpointer" -) - -const ( - fragmentRune = `#` -) - -// New creates a new reference for the given string -func New(jsonReferenceString string) (Ref, error) { - - var r Ref - err := r.parse(jsonReferenceString) - return r, err - -} - -// MustCreateRef parses the ref string and panics when it's invalid. -// Use the New method for a version that returns an error -func MustCreateRef(ref string) Ref { - r, err := New(ref) - if err != nil { - panic(err) - } - return r -} - -// Ref represents a json reference object -type Ref struct { - referenceURL *url.URL - referencePointer jsonpointer.Pointer - - HasFullURL bool - HasURLPathOnly bool - HasFragmentOnly bool - HasFileScheme bool - HasFullFilePath bool -} - -// GetURL gets the URL for this reference -func (r *Ref) GetURL() *url.URL { - return r.referenceURL -} - -// GetPointer gets the json pointer for this reference -func (r *Ref) GetPointer() *jsonpointer.Pointer { - return &r.referencePointer -} - -// String returns the best version of the url for this reference -func (r *Ref) String() string { - - if r.referenceURL != nil { - return r.referenceURL.String() - } - - if r.HasFragmentOnly { - return fragmentRune + r.referencePointer.String() - } - - return r.referencePointer.String() -} - -// IsRoot returns true if this reference is a root document -func (r *Ref) IsRoot() bool { - return r.referenceURL != nil && - !r.IsCanonical() && - !r.HasURLPathOnly && - r.referenceURL.Fragment == "" -} - -// IsCanonical returns true when this pointer starts with http(s):// or file:// -func (r *Ref) IsCanonical() bool { - return (r.HasFileScheme && r.HasFullFilePath) || (!r.HasFileScheme && r.HasFullURL) -} - -// "Constructor", parses the given string JSON reference -func (r *Ref) parse(jsonReferenceString string) error { - - parsed, err := url.Parse(jsonReferenceString) - if err != nil { - return err - } - - r.referenceURL, _ = url.Parse(purell.NormalizeURL(parsed, purell.FlagsSafe|purell.FlagRemoveDuplicateSlashes)) - refURL := r.referenceURL - - if refURL.Scheme != "" && refURL.Host != "" { - r.HasFullURL = true - } else { - if refURL.Path != "" { - r.HasURLPathOnly = true - } else if refURL.RawQuery == "" && refURL.Fragment != "" { - r.HasFragmentOnly = true - } - } - - r.HasFileScheme = refURL.Scheme == "file" - r.HasFullFilePath = strings.HasPrefix(refURL.Path, "/") - - // invalid json-pointer error means url has no json-pointer fragment. simply ignore error - r.referencePointer, _ = jsonpointer.New(refURL.Fragment) - - return nil -} - -// Inherits creates a new reference from a parent and a child -// If the child cannot inherit from the parent, an error is returned -func (r *Ref) Inherits(child Ref) (*Ref, error) { - childURL := child.GetURL() - parentURL := r.GetURL() - if childURL == nil { - return nil, errors.New("child url is nil") - } - if parentURL == nil { - return &child, nil - } - - ref, err := New(parentURL.ResolveReference(childURL).String()) - if err != nil { - return nil, err - } - return &ref, nil -} diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/.editorconfig b/cmd/bpa-operator/vendor/github.com/go-openapi/spec/.editorconfig deleted file mode 100644 index 3152da6..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/.editorconfig +++ /dev/null @@ -1,26 +0,0 @@ -# top-most EditorConfig file -root = true - -# Unix-style newlines with a newline ending every file -[*] -end_of_line = lf -insert_final_newline = true -indent_style = space -indent_size = 2 -trim_trailing_whitespace = true - -# Set default charset -[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] -charset = utf-8 - -# Tab indentation (no size specified) -[*.go] -indent_style = tab - -[*.md] -trim_trailing_whitespace = false - -# Matches the exact files either package.json or .travis.yml -[{package.json,.travis.yml}] -indent_style = space -indent_size = 2 diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/.gitignore b/cmd/bpa-operator/vendor/github.com/go-openapi/spec/.gitignore deleted file mode 100644 index dd91ed6..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -secrets.yml -coverage.out diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/.golangci.yml b/cmd/bpa-operator/vendor/github.com/go-openapi/spec/.golangci.yml deleted file mode 100644 index 0027708..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/.golangci.yml +++ /dev/null @@ -1,23 +0,0 @@ -linters-settings: - govet: - check-shadowing: true - golint: - min-confidence: 0 - gocyclo: - min-complexity: 45 - maligned: - suggest-new: true - dupl: - threshold: 100 - goconst: - min-len: 2 - min-occurrences: 2 - -linters: - enable-all: true - disable: - - maligned - - unparam - - lll - - gochecknoinits - - gochecknoglobals diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/.travis.yml b/cmd/bpa-operator/vendor/github.com/go-openapi/spec/.travis.yml deleted file mode 100644 index a4f0348..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/.travis.yml +++ /dev/null @@ -1,18 +0,0 @@ -after_success: -- bash <(curl -s https://codecov.io/bash) -go: -- '1.9' -- 1.10.x -- 1.11.x -install: -- go get -u github.com/stretchr/testify -- go get -u github.com/go-openapi/swag -- go get -u gopkg.in/yaml.v2 -- go get -u github.com/go-openapi/jsonpointer -- go get -u github.com/go-openapi/jsonreference -language: go -notifications: - slack: - secure: QUWvCkBBK09GF7YtEvHHVt70JOkdlNBG0nIKu/5qc4/nW5HP8I2w0SEf/XR2je0eED1Qe3L/AfMCWwrEj+IUZc3l4v+ju8X8R3Lomhme0Eb0jd1MTMCuPcBT47YCj0M7RON7vXtbFfm1hFJ/jLe5+9FXz0hpXsR24PJc5ZIi/ogNwkaPqG4BmndzecpSh0vc2FJPZUD9LT0I09REY/vXR0oQAalLkW0asGD5taHZTUZq/kBpsNxaAFrLM23i4mUcf33M5fjLpvx5LRICrX/57XpBrDh2TooBU6Qj3CgoY0uPRYUmSNxbVx1czNzl2JtEpb5yjoxfVPQeg0BvQM00G8LJINISR+ohrjhkZmAqchDupAX+yFrxTtORa78CtnIL6z/aTNlgwwVD8kvL/1pFA/JWYmKDmz93mV/+6wubGzNSQCstzjkFA4/iZEKewKUoRIAi/fxyscP6L/rCpmY/4llZZvrnyTqVbt6URWpopUpH4rwYqreXAtJxJsfBJIeSmUIiDIOMGkCTvyTEW3fWGmGoqWtSHLoaWDyAIGb7azb+KvfpWtEcoPFWfSWU+LGee0A/YsUhBl7ADB9A0CJEuR8q4BPpKpfLwPKSiKSAXL7zDkyjExyhtgqbSl2jS+rKIHOZNL8JkCcTP2MKMVd563C5rC5FMKqu3S9m2b6380E= -script: -- go test -v -race -cover -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md b/cmd/bpa-operator/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md deleted file mode 100644 index 9322b06..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/LICENSE b/cmd/bpa-operator/vendor/github.com/go-openapi/spec/LICENSE deleted file mode 100644 index d645695..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/README.md b/cmd/bpa-operator/vendor/github.com/go-openapi/spec/README.md deleted file mode 100644 index 6354742..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/README.md +++ /dev/null @@ -1,10 +0,0 @@ -# OAI object model [![Build Status](https://travis-ci.org/go-openapi/spec.svg?branch=master)](https://travis-ci.org/go-openapi/spec) [![codecov](https://codecov.io/gh/go-openapi/spec/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/spec) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) - -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/spec/master/LICENSE) -[![GoDoc](https://godoc.org/github.com/go-openapi/spec?status.svg)](http://godoc.org/github.com/go-openapi/spec) -[![GolangCI](https://golangci.com/badges/github.com/go-openapi/spec.svg)](https://golangci.com) -[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/spec)](https://goreportcard.com/report/github.com/go-openapi/spec) - -The object model for OpenAPI specification documents. - -Currently supports Swagger 2.0. diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/bindata.go b/cmd/bpa-operator/vendor/github.com/go-openapi/spec/bindata.go deleted file mode 100644 index d5ec7b9..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/bindata.go +++ /dev/null @@ -1,297 +0,0 @@ -// Code generated by go-bindata. DO NOT EDIT. -// sources: -// schemas/jsonschema-draft-04.json (4.357kB) -// schemas/v2/schema.json (40.249kB) - -package spec - -import ( - "bytes" - "compress/gzip" - "crypto/sha256" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - "time" -) - -func bindataRead(data []byte, name string) ([]byte, error) { - gz, err := gzip.NewReader(bytes.NewBuffer(data)) - if err != nil { - return nil, fmt.Errorf("Read %q: %v", name, err) - } - - var buf bytes.Buffer - _, err = io.Copy(&buf, gz) - clErr := gz.Close() - - if err != nil { - return nil, fmt.Errorf("Read %q: %v", name, err) - } - if clErr != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -type asset struct { - bytes []byte - info os.FileInfo - digest [sha256.Size]byte -} - -type bindataFileInfo struct { - name string - size int64 - mode os.FileMode - modTime time.Time -} - -func (fi bindataFileInfo) Name() string { - return fi.name -} -func (fi bindataFileInfo) Size() int64 { - return fi.size -} -func (fi bindataFileInfo) Mode() os.FileMode { - return fi.mode -} -func (fi bindataFileInfo) ModTime() time.Time { - return fi.modTime -} -func (fi bindataFileInfo) IsDir() bool { - return false -} -func (fi bindataFileInfo) Sys() interface{} { - return nil -} - -var _jsonschemaDraft04JSON = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xc4\x57\x3d\x6f\xdb\x3c\x10\xde\xf3\x2b\x08\x26\x63\xf2\x2a\x2f\xd0\xc9\x5b\xd1\x2e\x01\x5a\x34\x43\x37\x23\x03\x6d\x9d\x6c\x06\x14\xa9\x50\x54\x60\xc3\xd0\x7f\x2f\x28\x4a\x14\x29\x91\x92\x2d\xa7\x8d\x97\x28\xbc\xaf\xe7\x8e\xf7\xc5\xd3\x0d\x42\x08\x61\x9a\xe2\x15\xc2\x7b\xa5\x8a\x55\x92\xbc\x96\x82\x3f\x94\xdb\x3d\xe4\xe4\x3f\x21\x77\x49\x2a\x49\xa6\x1e\x1e\xbf\x24\xe6\xec\x16\xdf\x1b\xa1\x3b\xf3\xff\x02\xc9\x14\xca\xad\xa4\x85\xa2\x82\x6b\xe9\x6f\x42\x02\x32\x2c\x28\x07\x45\x5a\x15\x3d\x77\x46\x39\xd5\xcc\x25\x5e\x21\x83\xb8\x21\x18\xb6\xaf\x52\x92\xa3\x47\x68\x88\xea\x58\x80\x56\x4e\x1a\xf2\xbd\x4f\xcc\x29\x7f\x52\x90\x6b\x7d\xff\x0f\x48\xb4\x3d\x3f\x21\x7c\x27\x21\xd3\x2a\x6e\x31\xaa\x2d\x53\xdd\xf3\xe3\x42\x94\x54\xd1\x77\x78\xe2\x0a\x76\x20\xe3\x20\x68\xcb\x30\x86\x41\xf3\x2a\xc7\x2b\xf4\x78\x8e\xfe\xef\x90\x91\x8a\xa9\xc7\xb1\x1d\xc2\xd8\x2f\x0d\x75\xed\xc1\x4e\x9c\xc8\x25\x43\xac\xa8\xbe\xd7\xcc\xa9\xd1\xa9\x21\xa0\x1a\xbd\x04\x61\x94\x34\x2f\x18\xfc\x3e\x16\x50\x8e\x4d\x03\x6f\x1c\x58\xdb\x48\x23\xbc\x11\x82\x01\xe1\xfa\xd3\x3a\x8e\x30\xaf\x18\x33\x7f\xf3\x8d\x39\x11\x9b\x57\xd8\x2a\xfd\x55\x2a\x49\xf9\x0e\xc7\xec\x37\xd4\x25\xf7\xec\x5c\x66\xc7\xd7\x99\xaa\xcf\x4f\x89\x8a\xd3\xb7\x0a\x3a\xaa\x92\x15\xf4\x30\x6f\x1c\xb0\xd6\x46\xe7\x98\x39\x2d\xa4\x28\x40\x2a\x3a\x88\x9e\x29\xba\x88\x37\x2d\xca\x60\x38\xfa\xba\x5b\x20\xac\xa8\x62\xb0\x4c\xd4\xaf\xda\x45\x0a\xba\x5c\x3b\xb9\xc7\x79\xc5\x14\x2d\x18\x34\x19\x1c\x51\xdb\x25\x4d\xb4\x7e\x06\x14\x38\x6c\x59\x55\xd2\x77\xf8\x69\x59\xfc\x7b\x73\xed\x93\x43\xcb\x32\x6d\x3c\x28\xdc\x1b\x9a\xd3\x62\xab\xc2\x27\xf7\x41\xc9\x08\x2b\x23\x08\xad\x13\x57\x21\x9c\xd3\x72\x0d\x42\x72\xf8\x01\x7c\xa7\xf6\x83\xce\x39\xd7\x82\x3c\x1f\x2f\xd6\x60\x1b\xa2\xdf\x35\x89\x52\x20\xe7\x73\x74\xe0\x66\x26\x64\x4e\xb4\x97\x58\xc2\x0e\x0e\xe1\x60\x92\x34\x6d\xa0\x10\xd6\xb5\x83\x61\x27\xe6\x47\xd3\x89\xbd\x63\xfd\x3b\x8d\x03\x3d\x6c\x42\x2d\x5b\x70\xee\xe8\xdf\x4b\xf4\x66\x4e\xe1\x01\x45\x17\x80\x74\xad\x4f\xc3\xf3\xae\xc6\x1d\xc6\xd7\xc2\xce\xc9\xe1\x29\x30\x86\x2f\x4a\xa6\x4b\x15\x84\x73\xc9\x6f\xfd\x7f\xa5\x6e\x9e\xbd\xf1\xb0\xd4\xdd\x45\x5a\xc2\x3e\x4b\x78\xab\xa8\x84\x74\x4a\x91\x3b\x92\x23\x05\xf2\x1c\x1e\x7b\xf3\x09\xf8\xcf\xab\x24\xb6\x60\xa2\xe8\x4c\x9f\x75\x77\xaa\x8c\xe6\x01\x45\x36\x86\xcf\xc3\x63\x3a\xea\xd4\x8d\x7e\x06\xac\x14\x0a\xe0\x29\xf0\xed\x07\x22\x1a\x65\xda\x44\xae\xa2\x73\x1a\xe6\x90\x69\xa2\x8c\x46\xb2\x2f\xde\x49\x38\x08\xed\xfe\xfd\x41\xaf\x9f\xa9\x55\xd7\xdd\x22\x8d\xfa\x45\x63\xc5\x0f\x80\xf3\xb4\x08\xd6\x79\x30\x9e\x93\xee\x59\xa6\xd0\x4b\xee\x22\xe3\x33\xc1\x3a\x27\x68\x36\x78\x7e\x87\x0a\x06\xd5\x2e\x20\xd3\xaf\x15\xfb\xd8\x3b\x73\x14\xbb\x92\xed\x05\x5d\x2e\x29\x38\x2c\x94\xe4\x42\x45\x5e\xd3\xb5\x7d\xdf\x47\xca\x38\xb4\x5c\xaf\xfb\x7d\xdd\x6d\xf4\xa1\x2d\x77\xdd\x2f\xce\x6d\xc4\x7b\x8b\x4e\x67\xa9\x6f\xfe\x04\x00\x00\xff\xff\xb1\xd1\x27\x78\x05\x11\x00\x00") - -func jsonschemaDraft04JSONBytes() ([]byte, error) { - return bindataRead( - _jsonschemaDraft04JSON, - "jsonschema-draft-04.json", - ) -} - -func jsonschemaDraft04JSON() (*asset, error) { - bytes, err := jsonschemaDraft04JSONBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "jsonschema-draft-04.json", size: 4357, mode: os.FileMode(436), modTime: time.Unix(1540282154, 0)} - a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe1, 0x48, 0x9d, 0xb, 0x47, 0x55, 0xf0, 0x27, 0x93, 0x30, 0x25, 0x91, 0xd3, 0xfc, 0xb8, 0xf0, 0x7b, 0x68, 0x93, 0xa8, 0x2a, 0x94, 0xf2, 0x48, 0x95, 0xf8, 0xe4, 0xed, 0xf1, 0x1b, 0x82, 0xe2}} - return a, nil -} - -var _v2SchemaJSON = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x5d\x4f\x93\xdb\x36\xb2\xbf\xfb\x53\xa0\x14\x57\xd9\xae\xd8\x92\xe3\xf7\x2e\xcf\x97\xd4\xbc\xd8\x49\x66\x37\x5e\x4f\x79\x26\xbb\x87\x78\x5c\x05\x91\x2d\x09\x09\x09\x30\x00\x38\x33\x5a\xef\x7c\xf7\x2d\xf0\x9f\x08\x02\x20\x41\x8a\xd2\xc8\x0e\x0f\xa9\x78\x28\xa0\xd1\xdd\x68\x34\x7e\xdd\xf8\xf7\xf9\x11\x42\x33\x49\x64\x04\xb3\xd7\x68\x76\x86\xfe\x76\xf9\xfe\x1f\xe8\x32\xd8\x40\x8c\xd1\x8a\x71\x74\x79\x8b\xd7\x6b\xe0\xe8\xd5\xfc\x25\x3a\xbb\x38\x9f\xcf\x9e\xab\x0a\x24\x54\xa5\x37\x52\x26\xaf\x17\x0b\x91\x17\x99\x13\xb6\xb8\x79\xb5\x10\x59\xdd\xf9\xef\x82\xd1\x6f\xf2\xc2\x8f\xf3\x4f\xb5\x1a\xea\xc7\x17\x45\x41\xc6\xd7\x8b\x90\xe3\x95\x7c\xf1\xf2\x7f\x8b\xca\x45\x3d\xb9\x4d\x32\xa6\xd8\xf2\x77\x08\x64\xfe\x8d\xc3\x9f\x29\xe1\xa0\x9a\xff\xed\x11\x42\x08\xcd\x8a\xd6\xb3\x9f\x15\x67\x74\xc5\xca\x7f\x27\x58\x6e\xc4\xec\x11\x42\xd7\x59\x5d\x1c\x86\x44\x12\x46\x71\x74\xc1\x59\x02\x5c\x12\x10\xb3\xd7\x68\x85\x23\x01\x59\x81\x04\x4b\x09\x9c\x6a\xbf\x7e\xce\x49\x7d\xba\x7b\x51\xfd\xa1\x44\xe2\xb0\x52\xac\x7d\xb3\x08\x61\x45\x68\x46\x56\x2c\x6e\x80\x86\x8c\xbf\xbd\x93\x40\x05\x61\x74\x96\x95\xbe\x7f\x84\xd0\x7d\x4e\xde\x42\xb7\xe4\xbe\x46\xbb\x14\x5b\x48\x4e\xe8\xba\x90\x05\xa1\x19\xd0\x34\xae\xc4\xce\xbe\xbc\x9a\xbf\x9c\x15\x7f\x5d\x57\xc5\x42\x10\x01\x27\x89\xe2\x48\x51\xb9\xda\x40\xd5\x87\x37\xc0\x15\x5f\x88\xad\x90\xdc\x10\x81\x42\x16\xa4\x31\x50\x39\x2f\x38\xad\xab\xb0\x53\xd8\xac\x94\x56\x6f\xc3\x84\xf4\x11\xa4\x50\xb3\xfa\xe9\xd3\x6f\x9f\x3e\xdf\x2f\xd0\xeb\x8f\x1f\x3f\x7e\xbc\xfe\xf6\xe9\xf7\xaf\x5f\x7f\xfc\x18\x7e\xfb\xec\xfb\xc7\xb3\x36\x79\x54\x43\xe8\x29\xc5\x31\x20\xc6\x11\x49\x9e\xe5\x12\x41\x66\xa0\xe8\xed\x1d\x8e\x93\x08\x5e\xa3\x27\x3b\xc3\x7c\xa2\x73\xba\xc4\x02\x2e\xb0\xdc\xf4\xe5\x76\xd1\xca\x96\xa2\x8a\x94\xcd\x21\xc9\x6c\xec\x2c\x70\x42\x9e\x34\x74\x9d\x19\x7c\xcd\x20\x9c\xea\x2e\x0a\xfe\x42\x84\xd4\x29\x04\x8c\x8a\xb4\x41\xa2\xc1\xdc\x19\x8a\x88\x90\x4a\x49\xef\xce\xdf\xbd\x45\x4a\x52\x81\x70\x10\x40\x22\x21\x44\xcb\x6d\xc5\xec\x4e\x3c\x1c\x45\xef\x57\x9a\xb5\x7d\xae\xfe\xe5\xe4\x31\x86\x90\xe0\xab\x6d\x02\x3b\x2e\xcb\x11\x90\xd9\xa8\xc6\x77\xc2\x59\x98\x06\xfd\xf9\x2e\x78\x45\x01\xa6\xa8\xa0\x71\x5c\xbe\x33\xa7\xd2\xd9\x5f\x95\xef\xd9\xd5\xac\xfd\xdc\x5d\xbf\x5e\xb8\xd1\x3e\xc7\x31\x48\xe0\x5e\x4c\x14\x65\xdf\xb8\xa8\x71\x10\x09\xa3\xc2\xc7\x02\xcb\xa2\x4e\x5a\x02\x82\x94\x13\xb9\xf5\x30\xe6\xb2\xa4\xb5\xfe\x9b\x3e\x7a\xb2\x55\xd2\xa8\x4a\xbc\x16\xb6\x71\x8e\x39\xc7\xdb\x9d\xe1\x10\x09\x71\xbd\x9c\xb3\x41\x89\xd7\xa5\x89\xdc\x57\xb5\x53\x4a\xfe\x4c\xe1\xbc\xa0\x21\x79\x0a\x1a\x0f\x70\xa7\x5c\x08\x8e\xde\xb0\xc0\x43\x24\xad\x74\x63\x0e\xb1\xd9\x90\xe1\xb0\x2d\x13\xa7\x6d\x78\xfd\x04\x14\x38\x8e\x90\xaa\xce\x63\xac\x3e\x23\xbc\x64\xa9\xb4\xf8\x03\x63\xde\xcd\xbe\x16\x13\x4a\x55\xac\x82\x12\xc6\xac\xd4\x35\xf7\x22\xd4\x3a\xff\x22\x73\x0e\x6e\x51\xa0\x75\x1e\xae\x8f\xe8\x5d\xc7\x59\xe6\xe4\x9a\x18\x8d\xd6\x1c\x53\x84\x4d\xb7\x67\x28\x37\x09\x84\x69\x88\x12\x0e\x01\x11\x80\x32\xa2\xf5\xb9\xaa\xc6\xd9\x73\x53\xab\xfb\xb4\x2e\x20\xc6\x54\x92\xa0\x9a\xf3\x69\x1a\x2f\x81\x77\x37\xae\x53\x1a\xce\x40\xc4\xa8\x82\x1c\xb5\xef\xda\x24\x7d\xb9\x61\x69\x14\xa2\x25\xa0\x90\xac\x56\xc0\x81\x4a\xb4\xe2\x2c\xce\x4a\x64\x7a\x9a\x23\xf4\x13\x91\x3f\xa7\x4b\xf4\x63\x84\x6f\x18\x87\x10\xbd\xc3\xfc\x8f\x90\xdd\x52\x44\x04\xc2\x51\xc4\x6e\x21\x74\x48\x21\x81\xc7\xe2\xfd\xea\x12\xf8\x0d\x09\xf6\xe9\x47\x35\xaf\x67\xc4\x14\xf7\x22\x27\x97\xe1\xe2\x76\x2d\x06\x8c\x4a\x1c\x48\x3f\x73\x2d\x0b\x5b\x29\x45\x24\x00\x2a\x0c\x11\xec\x94\xca\xc2\xa6\xc1\x37\x21\x43\x83\x3b\x5f\x97\xf1\x43\x5e\x53\x73\x19\xa5\x36\xd8\x2d\x05\x2e\x34\x0b\xeb\x39\xfc\x1d\x63\x51\x01\xbd\x3d\xbb\x90\x84\x40\x25\x59\x6d\x09\x5d\xa3\x1c\x37\xe6\x5c\x16\x9a\x40\x09\x70\xc1\xe8\x82\xf1\x35\xa6\xe4\xdf\x99\x5c\x8e\x9e\x4d\x79\xb4\x27\x2f\xbf\x7e\xf8\x05\x25\x8c\x50\xa9\x98\x29\x90\x62\x60\xea\x75\xae\x13\xca\xbf\x2b\x1a\x29\x27\x76\xd6\x20\xc6\x64\x5f\xe6\x32\x1a\x08\x87\x21\x07\x21\xbc\xb4\xe4\xe0\x32\x67\xa6\xcd\xf3\x1e\xcd\xd9\x6b\xb6\x6f\x8e\x27\xa7\xed\xdb\xe7\xbc\xcc\x1a\x07\xce\x6f\x87\x33\xf0\xba\x51\x17\x22\x66\x78\x79\x8e\xce\xe5\x13\x81\x80\x06\x2c\xe5\x78\x0d\xa1\xb2\xb8\x54\xa8\x79\x09\xbd\xbf\x3c\x47\x01\x8b\x13\x2c\xc9\x32\xaa\xaa\x1d\xd5\xee\xab\x36\xbd\x6c\xfd\x54\x6c\xc8\x08\x01\x3c\xbd\xe7\x07\x88\xb0\x24\x37\x79\x90\x28\x4a\x1d\x10\x1a\x92\x1b\x12\xa6\x38\x42\x40\xc3\x4c\x43\x62\x8e\xae\x36\xb0\x45\x71\x2a\xa4\x9a\x23\x79\x59\xb1\xa8\xf2\xa4\x0c\x60\x9f\xcc\x8d\x40\xf5\x80\xca\xa8\x99\xc3\xa7\x85\x1f\x31\x25\xa9\x82\xc5\x6d\xbd\xd8\x36\x76\x7c\x02\x28\x97\xf6\x1d\x74\x3b\x11\x7e\x91\xae\x32\xf8\x6c\xf4\xe6\x7b\x9a\xa5\x1f\x62\xc6\x21\xcf\x9a\xe5\xed\x8b\x02\xf3\x2c\x33\x33\xdf\x00\xca\xc9\x09\xb4\x04\xf5\xa5\x08\xd7\xc3\x02\x18\x66\xf1\xab\x1e\x83\x37\x4c\xcd\x12\xc1\x1d\x50\xf6\xaa\xbd\xfe\xe2\x73\x48\x38\x08\xa0\x32\x9b\x18\x44\x86\x0b\x6a\xc1\xaa\x26\x96\x2d\x96\x3c\xa0\x54\x65\x73\x87\x15\xca\x15\xe5\xf5\x94\x46\x9f\x33\x1a\x0c\x9a\xb1\x5a\xd9\x6a\x95\xcd\xcb\x7e\xec\x9a\xc5\x94\x3b\x37\x26\x31\xd7\xfc\xe4\x1f\x13\x8c\x31\x75\x9c\xba\xf7\x87\x3c\xa1\xb7\x4f\x17\x1b\x09\x82\x98\xc4\x70\x95\xd3\xe8\x4c\x48\x5a\xa6\xd6\x2a\x3d\x56\x42\x80\x9f\xaf\xae\x2e\x50\x0c\x42\xe0\x35\x34\x3c\x8a\x62\x03\x37\xba\xb2\x27\x04\xda\x25\x8d\x06\xe2\xa0\x13\x8a\xf3\xf5\xec\x10\x72\x67\x88\x90\x3d\x4b\x64\xeb\xaa\xda\x8f\xf7\x5a\x75\x47\x9a\xa8\x51\x70\x26\xd2\x38\xc6\x7c\xbb\x57\xfc\xbd\xe4\x04\x56\xa8\xa0\x54\x9a\x45\xd5\xf7\x0f\x16\xfc\x57\x1c\x3c\xdf\x23\xba\x77\x38\xda\x16\x4b\x31\x53\x6a\x4d\x9a\x15\x63\xe7\xe1\x18\x69\x9f\x22\xe0\x24\xbb\x94\x4b\x97\xee\x2d\xf9\x70\x87\x72\x7b\xe6\xc4\x33\x2a\x66\x5e\x1c\x35\x72\xe3\x2d\xda\x73\xe4\xc7\x51\x6d\xa4\xa1\x2a\x4f\xde\x94\xcb\xb2\x3e\x31\x48\xae\x82\xce\xc9\xc8\x65\xcd\xc3\xb7\x34\xb6\x2b\xdf\x58\x65\x78\x6e\x73\xac\x5e\x24\x0d\x3f\xdc\x70\x23\xc6\xda\x52\x0b\x2d\x63\x7d\xa9\x49\x2d\x54\x48\x28\xc0\x12\x9c\xe3\x63\xc9\x58\x04\x98\x36\x07\xc8\x0a\xa7\x91\xd4\xf0\xbc\xc1\xa8\xb9\x70\xd0\xc6\xa9\xb6\x78\x80\x5a\xa3\xb4\x2c\xf4\x18\x0b\x8a\x9d\xd0\xb4\x55\x10\xee\x0d\xc5\xd6\xe0\x99\x93\xdc\xa1\x04\xbb\xf1\xa7\x23\xd1\xd1\x97\x8c\x87\x13\x0a\x21\x02\xe9\x99\x25\xed\x20\xc5\x92\x66\x3c\x32\x9c\xd6\x06\xb0\x31\x5c\x86\x29\x0a\xcb\x60\x33\x12\xa5\x91\xfc\x96\x75\xd0\x59\xd7\x13\xbd\xd3\x23\x79\xdd\x2a\x90\xa6\x38\x06\x91\x39\x7f\x20\x72\x03\x1c\x2d\x01\x61\xba\x45\x37\x38\x22\x61\x8e\x71\x85\xc4\x32\x15\x28\x60\x61\x16\xb8\x3d\x29\xdc\x4d\x3d\x2f\x12\x13\x7d\xc8\x7e\x37\xee\xa8\x7f\xfa\xdb\xcb\x17\xff\x77\xfd\xf9\x7f\xee\x9f\x3d\xfe\xcf\xa7\xa7\x45\xfb\xcf\x1e\xf7\xf3\xe0\xff\xc4\x51\x0a\x8e\x4c\xcb\x01\xdc\x0a\x65\xb2\x01\x83\xed\x3d\xe4\xa9\xa3\x4e\x2d\x59\xc5\xe8\x2f\x48\x7d\x5a\x6e\x37\xbf\x5c\x9f\x35\x13\x64\x14\xfa\xef\x0b\x68\xa6\x0d\xb4\x8e\xf1\xa8\xff\xbb\x60\xf4\x03\x64\xab\x5b\x81\x65\x51\xe6\xda\xca\xfa\xf0\xb0\xac\x3e\x9c\xca\x26\x0e\x1d\xdb\x57\x5b\xbb\xb4\x9a\xa6\xb6\x9b\x1a\x6b\xd1\x9a\x9e\x7e\x33\x9a\xec\x41\x69\x45\x22\xb8\xb4\x51\xeb\x04\x77\xca\x6f\x7b\x7b\xc8\xb2\xb0\x95\x92\x25\x5b\xd0\x42\xaa\x2a\xdd\x32\x78\x4f\x0c\xab\x68\x46\x6c\xea\x6d\xf4\x5c\x5e\xde\xc4\xac\xa5\xf9\xd1\x00\x9f\x7d\x98\x65\x24\xbd\xc7\x97\xd4\xb3\x3a\xa8\x2b\xa0\x34\x76\xf9\x65\x5f\x2d\x25\x95\x1b\xcf\xd6\xf4\x9b\x5f\x09\x95\xb0\x36\x3f\xdb\xd0\x39\x2a\x93\x1c\x9d\x03\xa2\x4a\xca\xf5\xf6\x10\xb6\x94\x89\x0b\x6a\x70\x12\x13\x49\x6e\x40\xe4\x29\x12\x2b\xbd\x80\x45\x11\x04\xaa\xc2\x8f\x56\x9e\x5c\x6b\xec\x8d\x5a\x0e\x14\x59\x06\x2b\x1e\x24\xcb\xc2\x56\x4a\x31\xbe\x23\x71\x1a\xfb\x51\x2a\x0b\x3b\x1c\x48\x10\xa5\x82\xdc\xc0\xbb\x3e\x24\x8d\x5a\x76\x2e\x09\xed\xc1\x65\x51\xb8\x83\xcb\x3e\x24\x8d\x5a\x2e\x5d\xfe\x02\x74\x2d\x3d\xf1\xef\xae\xb8\x4b\xe6\x5e\xd4\xaa\xe2\x2e\x5c\x5e\xec\x0e\xf5\x5b\x0c\xcb\x0a\xbb\xa4\x3c\xf7\x1f\x2a\x55\x69\x97\x8c\x7d\x68\x95\xa5\xad\xb4\xf4\x9c\xa5\x07\xb9\x7a\x05\xbb\xad\x50\x6f\xfb\xa0\x4e\x9b\x48\x23\x49\x92\x28\x87\x19\x3e\x32\xee\xca\x3b\x46\x7e\x7f\x18\x64\xcc\xcc\x0f\x34\xe9\x36\x8b\xb7\x6c\xa8\xa5\x5b\x54\x4c\x54\x5b\x15\x3a\xf1\x6c\x2d\xfe\x96\xc8\x0d\xba\x7b\x81\x88\xc8\x23\xab\xee\x7d\x3b\x92\xa7\x60\x29\xe3\xdc\xff\xb8\x64\xe1\xf6\xa2\x5a\x59\xdc\x6f\xeb\x45\x7d\x6a\xd1\x76\x1e\xea\xb8\xf1\xfa\x14\xd3\x36\x63\xe5\xd7\xf3\xe4\xbe\x25\xbd\x5e\x05\xeb\x73\x74\xb5\x21\x2a\x2e\x4e\xa3\x30\xdf\xbf\x43\x28\x2a\xd1\xa5\x2a\x9d\x8a\xfd\x76\xd8\x8d\xbc\x67\x65\xc7\xb8\x03\x45\xec\xa3\xb0\x37\x8a\x70\x4c\x68\x91\x51\x8e\x58\x80\xed\x4a\xf3\x81\x62\xca\x96\xbb\xf1\x52\xcd\x80\xfb\xe4\x4a\x5d\x6c\xdf\x6e\x20\x4b\x80\x30\x8e\x28\x93\xf9\xe9\x8d\x8a\x6d\xd5\x59\x65\x7b\xaa\x44\x9e\xc0\xc2\xd1\x7c\x40\x26\xd6\x1a\xce\xf9\xc5\x69\x7b\x6c\xec\xc8\x71\x7b\xe5\x21\x2e\xd3\xe5\x65\x93\x91\x53\x0b\x7b\x3a\xc7\xfa\x17\x6a\x01\xa7\x33\xd0\xf4\x40\x0f\x39\x87\xda\xe4\x54\x87\x3a\xd5\xe3\xc7\xa6\x8e\x20\xd4\x11\xb2\x4e\xb1\xe9\x14\x9b\x4e\xb1\xe9\x14\x9b\xfe\x15\x63\xd3\x47\xf5\xff\x97\x38\xe9\xcf\x14\xf8\x76\x82\x49\x13\x4c\xaa\x7d\xcd\x6c\x62\x42\x49\x87\x43\x49\x19\x33\x6f\xe3\x44\x6e\x9b\xab\x8a\x3e\x86\xaa\x99\x52\x1b\x5b\x59\x33\x02\x09\xa0\x21\xa1\x6b\x84\x6b\x66\xbb\xdc\x16\x0c\xd3\x68\xab\xec\x36\x4b\xd8\x60\x8a\x40\x31\x85\x6e\x14\x57\x13\xc2\xfb\x92\x10\xde\xbf\x88\xdc\xbc\x53\x5e\x7f\x82\x7a\x13\xd4\x9b\xa0\xde\x04\xf5\x90\x01\xf5\x94\xcb\x7b\x83\x25\x9e\xd0\xde\x84\xf6\x6a\x5f\x4b\xb3\x98\x00\xdf\x04\xf8\x6c\xbc\x7f\x19\x80\xaf\xf1\x71\x45\x22\x98\x40\xe0\x04\x02\x27\x10\xd8\x29\xf5\x04\x02\xff\x4a\x20\x30\xc1\x72\xf3\x65\x02\x40\xd7\xc1\xd1\xe2\x6b\xf1\xa9\x7b\xfb\xe4\x20\xc0\x68\x9d\xd4\xb4\xd3\x96\xb5\xa6\xd1\x41\x20\xe6\x89\xc3\x48\x65\x58\x13\x84\x9c\x56\x56\x3b\x0c\xe0\x6b\x83\x5c\x13\xd2\x9a\x90\xd6\x84\xb4\x26\xa4\x85\x0c\xa4\x45\x19\xfd\xff\x63\x6c\x52\xb5\x1f\x1e\x19\x74\x3a\xcd\xb9\x69\xce\xa6\x3a\x0f\x7a\x2d\x19\xc7\x81\x14\x5d\xcb\xd5\x03\xc9\x39\xd0\xb0\xd1\xb3\xcd\xfb\x7a\x2d\x5d\x3a\x48\xe1\xfa\x2e\xe6\x81\x42\x18\x86\xd6\xc1\xbe\xb1\x23\xd3\xf7\x34\xed\x19\x0a\x0b\xc4\x48\x44\xfd\x22\x50\xb6\x42\x58\xbb\xe5\x3d\xa7\x73\xd4\x8b\xc4\x8c\x70\x61\xec\x73\xee\xc3\x81\x8b\xf5\xe2\xd7\x52\x3e\xcf\xeb\xeb\x17\x3b\x71\x16\xda\x7d\xb8\xde\xf0\x7a\x8f\x06\x2d\xa7\x40\x7b\xc1\x9d\x41\x4d\xb6\x61\xa2\x4e\x9f\x3d\xa0\xc5\xae\xe3\x1c\x1d\x40\x6c\x48\x8b\x63\xa0\xb5\x01\xed\x8e\x02\xe9\x86\xc8\x3b\x06\xee\xdb\x4b\xde\xbd\xc0\xa1\x6f\xcb\xda\xfc\xc2\x44\x16\x87\x9c\x17\x31\xd3\x30\x20\x39\x42\xcb\x6f\xf2\xf1\xf4\x72\x10\xf8\x1c\xa0\xf3\xbd\x10\xea\x21\x35\x7d\xe8\x86\xdb\x15\xed\x81\x81\x07\x28\xbb\x13\x28\xc7\xf8\xce\x7d\x8d\xc2\x31\xb4\x7e\x94\xd6\xdb\x55\xef\x4a\xfb\xed\xc3\x40\x3e\xeb\x9f\xe9\x99\x0f\xdf\x08\x65\x88\x27\x73\x86\x31\x9d\x47\xdf\x55\x19\xba\x3d\xee\x15\x0a\xcd\x8c\xaa\x5e\xb9\xf6\x57\x33\x73\x5a\xa1\x89\x7b\x3b\xa0\xb2\xa4\xc2\xf6\xc1\x53\xb5\x00\xca\x23\xe5\xf4\x60\x6a\xb4\x2d\x74\xea\x4e\xed\x3b\xe3\x47\xfb\xed\x82\x3d\x19\xd4\x3b\x6b\xaf\xae\x2b\x2f\x57\xb3\x82\x68\xcb\xed\x88\x2e\xe1\x5c\xd7\x26\xfa\x0a\x65\xe7\xce\x11\x33\xb4\xdd\x66\xe3\x37\xf6\xfa\x70\xd6\x4f\xa1\x21\x51\xd8\x3c\x26\x14\x4b\xc6\x87\x44\x27\x1c\x70\xf8\x9e\x46\xce\xab\x21\x07\x5f\xc1\x76\x17\x1b\x77\xb4\xda\x75\xa0\x0a\x3a\x30\xe1\xf8\x97\x32\x16\x2b\x00\x75\x85\xee\x62\x46\xef\xd3\x85\xb5\x6b\x60\xbe\xf2\x30\x7a\x8c\x0b\x4b\xa6\xd0\xf9\x64\x42\xe7\x07\x41\x41\xe3\x2c\x5d\xf9\x6d\xe9\x39\x98\x3b\x3b\x5d\x67\xd4\x5c\xed\xf2\xf0\x48\x7b\xbd\x2d\x31\xdd\x3f\x34\xad\x44\x76\x51\x9a\x56\x22\xa7\x95\xc8\x69\x25\xf2\xe1\x56\x22\x1f\x00\x32\x6a\x73\x92\xed\xe1\xc6\x7d\x9f\x49\x2c\x69\x7e\xc8\x31\x4c\x0c\xb4\xf2\x54\x3b\x79\x3b\x9e\x4d\xb4\xd1\x18\x3e\x5f\x9a\x93\xa2\x11\xc3\xda\x27\x0b\xaf\x37\x2e\x5c\x37\xfb\xeb\x9a\xd6\xc3\xac\xc3\xcc\xf8\x1e\x5b\x9d\xac\x22\x64\xb7\xed\x26\xb8\xf3\xb9\x3c\xbb\x1f\xe2\xb0\x22\x77\x43\x6a\x62\x29\x39\x59\xa6\xe6\xe5\xcd\x7b\x83\xc0\x5b\x8e\x93\x64\xac\xeb\xca\x4f\x65\xac\x4a\xbc\x1e\xcd\x82\xfa\x3c\x70\x36\xb6\xb5\xed\x79\xef\xec\x68\x00\xff\x54\xfa\xb5\xe3\xf1\xdb\xe1\xbe\xce\x76\x17\xaf\x57\xb6\x6b\x89\x05\x09\xce\x52\xb9\x01\x2a\x49\xbe\xd9\xf4\xd2\xb8\x7a\xbf\x91\x02\xf3\x22\x8c\x13\xf2\x77\xd8\x8e\x43\x8b\xe1\x54\x6e\x5e\x9d\xc7\x49\x44\x02\x22\xc7\xa4\x79\x81\x85\xb8\x65\x3c\x1c\x93\xe6\x59\xa2\xf8\x1c\x51\x95\x05\xd9\x20\x00\x21\x7e\x60\x21\x58\xa9\x56\xff\xbe\xb6\x5a\x5e\x5b\x3f\x1f\xd6\xd3\x3c\xc4\x4d\xba\x99\xb4\x63\x6e\x7d\x3e\x3d\x57\xd2\x18\x5f\x47\xe8\xc3\x06\x8a\x68\x6c\x7f\x3b\x72\x0f\xe7\xe2\x77\x77\xf1\xd0\x99\xab\xdf\x2e\xfe\xd6\xbb\xcd\x1a\xb9\x90\xd1\xaf\xf2\x38\x3d\xdb\x74\xf8\xeb\xe3\xda\xe8\x2a\x62\xb7\xda\x1b\x07\xa9\xdc\x30\x5e\xbc\x68\xfb\x6b\x9f\x97\xf1\xc6\xb1\xd8\x5c\x29\x1e\x49\x30\xc5\xf7\xde\xad\x91\x42\xf9\xdd\xed\x89\x80\x25\xbe\x37\xd7\xe7\x32\x5c\xe6\x35\xac\xd4\x0c\x2d\xf7\x90\xc4\xe3\xf5\xe3\x2f\x7f\x54\x18\x88\xe3\x61\x47\x85\x64\x7f\xc0\xd7\x3f\x1a\x92\x42\xe9\xc7\x1e\x0d\x95\x76\xa7\x51\xa0\x8f\x02\x1b\x46\x9e\x06\x42\xd1\xf2\x01\x07\x02\xde\xe9\x7d\x1a\x0b\xa7\x32\x16\xcc\xc0\xee\xc4\x90\xd2\x5f\x6f\x98\x54\x5d\xf2\x95\xe1\xa7\x69\x10\x3a\x06\xe1\x65\xb3\x17\x47\x58\x78\xd0\x45\xd6\x5b\xd5\x5f\x25\x1d\x71\x49\xa6\x7a\x64\xda\xd0\x6f\xc7\x3a\x4c\xe3\x09\xc0\x6e\x96\x2c\xa7\xa7\x77\x34\x10\x05\x08\x21\x44\x92\x65\x77\xdf\x20\x5c\xbc\xe7\x97\x3f\xf4\x1a\x45\xd6\xe7\x27\x4a\xde\x74\x27\x66\x11\x7d\x70\xba\xd3\x78\xf9\x1e\x0d\xca\xc8\x39\xde\x7c\xb3\xa6\xe1\xbc\xd7\xc1\x6a\x6f\xb3\x0e\x52\xbe\xe4\x98\x8a\x15\x70\x94\x70\x26\x59\xc0\xa2\xf2\x1c\xfb\xd9\xc5\xf9\xbc\xd5\x92\x9c\xa3\xdf\xe6\x1e\xb3\x0d\x49\xba\x87\x50\x5f\x84\xfe\xe9\xd6\xf8\xbb\xe6\xf0\x7a\xeb\xa6\x65\x3b\x86\x8b\x79\x93\xf5\x59\x20\x6e\xb4\xa7\x44\xf4\x3f\xa5\xfe\x67\x42\x12\xdb\xd3\xe7\xbb\xa5\xa3\x8c\x5c\x2b\x97\xbb\xbb\x7f\x8e\xc5\x6e\xed\x43\x5c\xbf\x74\xc8\x8f\xff\xe6\xd6\xbe\x91\xb6\xf5\x95\xe4\xed\x93\xc4\xa8\x5b\xf9\x76\x4d\x35\xb7\xd8\x8c\xb6\x7d\xaf\x72\xe0\xb6\xbd\x01\x63\x9e\x76\xab\x1a\x32\x76\xe4\x8c\x76\xc2\xad\x6c\xa2\x65\xf7\xcf\xf8\xa7\xda\x2a\xb9\x8c\x3d\x3c\xa3\x9d\x64\x33\xe5\x1a\xb5\x2d\xfb\x86\xa2\x5a\x7f\x19\x5b\x7f\xc6\x3f\xd1\x53\xd3\xe2\x41\x5b\xd3\x4f\xf0\xec\xb0\x42\x73\x43\xd2\x68\x27\xd3\x6a\x6a\x34\xf6\x4e\x1e\x52\x8b\x87\x6c\xcc\xae\x44\xfb\x9e\xa7\x51\x4f\x9d\x55\x03\x81\x8e\x67\xfc\xb4\x69\xf0\x3a\x18\xf2\x40\xd0\xf6\xa8\x34\xe3\xc9\x98\xaf\xf6\xda\x24\xd3\xeb\x60\xb9\x0e\xd3\x1f\xa9\xff\xee\x1f\xfd\x37\x00\x00\xff\xff\x69\x5d\x0a\x6a\x39\x9d\x00\x00") - -func v2SchemaJSONBytes() ([]byte, error) { - return bindataRead( - _v2SchemaJSON, - "v2/schema.json", - ) -} - -func v2SchemaJSON() (*asset, error) { - bytes, err := v2SchemaJSONBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "v2/schema.json", size: 40249, mode: os.FileMode(436), modTime: time.Unix(1540282154, 0)} - a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xcb, 0x25, 0x27, 0xe8, 0x46, 0xae, 0x22, 0xc4, 0xf4, 0x8b, 0x1, 0x32, 0x4d, 0x1f, 0xf8, 0xdf, 0x75, 0x15, 0xc8, 0x2d, 0xc7, 0xed, 0xe, 0x7e, 0x0, 0x75, 0xc0, 0xf9, 0xd2, 0x1f, 0x75, 0x57}} - return a, nil -} - -// Asset loads and returns the asset for the given name. -// It returns an error if the asset could not be found or -// could not be loaded. -func Asset(name string) ([]byte, error) { - canonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[canonicalName]; ok { - a, err := f() - if err != nil { - return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) - } - return a.bytes, nil - } - return nil, fmt.Errorf("Asset %s not found", name) -} - -// AssetString returns the asset contents as a string (instead of a []byte). -func AssetString(name string) (string, error) { - data, err := Asset(name) - return string(data), err -} - -// MustAsset is like Asset but panics when Asset would return an error. -// It simplifies safe initialization of global variables. -func MustAsset(name string) []byte { - a, err := Asset(name) - if err != nil { - panic("asset: Asset(" + name + "): " + err.Error()) - } - - return a -} - -// MustAssetString is like AssetString but panics when Asset would return an -// error. It simplifies safe initialization of global variables. -func MustAssetString(name string) string { - return string(MustAsset(name)) -} - -// AssetInfo loads and returns the asset info for the given name. -// It returns an error if the asset could not be found or -// could not be loaded. -func AssetInfo(name string) (os.FileInfo, error) { - canonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[canonicalName]; ok { - a, err := f() - if err != nil { - return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) - } - return a.info, nil - } - return nil, fmt.Errorf("AssetInfo %s not found", name) -} - -// AssetDigest returns the digest of the file with the given name. It returns an -// error if the asset could not be found or the digest could not be loaded. -func AssetDigest(name string) ([sha256.Size]byte, error) { - canonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[canonicalName]; ok { - a, err := f() - if err != nil { - return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s can't read by error: %v", name, err) - } - return a.digest, nil - } - return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s not found", name) -} - -// Digests returns a map of all known files and their checksums. -func Digests() (map[string][sha256.Size]byte, error) { - mp := make(map[string][sha256.Size]byte, len(_bindata)) - for name := range _bindata { - a, err := _bindata[name]() - if err != nil { - return nil, err - } - mp[name] = a.digest - } - return mp, nil -} - -// AssetNames returns the names of the assets. -func AssetNames() []string { - names := make([]string, 0, len(_bindata)) - for name := range _bindata { - names = append(names, name) - } - return names -} - -// _bindata is a table, holding each asset generator, mapped to its name. -var _bindata = map[string]func() (*asset, error){ - "jsonschema-draft-04.json": jsonschemaDraft04JSON, - - "v2/schema.json": v2SchemaJSON, -} - -// AssetDir returns the file names below a certain -// directory embedded in the file by go-bindata. -// For example if you run go-bindata on data/... and data contains the -// following hierarchy: -// data/ -// foo.txt -// img/ -// a.png -// b.png -// then AssetDir("data") would return []string{"foo.txt", "img"}, -// AssetDir("data/img") would return []string{"a.png", "b.png"}, -// AssetDir("foo.txt") and AssetDir("notexist") would return an error, and -// AssetDir("") will return []string{"data"}. -func AssetDir(name string) ([]string, error) { - node := _bintree - if len(name) != 0 { - canonicalName := strings.Replace(name, "\\", "/", -1) - pathList := strings.Split(canonicalName, "/") - for _, p := range pathList { - node = node.Children[p] - if node == nil { - return nil, fmt.Errorf("Asset %s not found", name) - } - } - } - if node.Func != nil { - return nil, fmt.Errorf("Asset %s not found", name) - } - rv := make([]string, 0, len(node.Children)) - for childName := range node.Children { - rv = append(rv, childName) - } - return rv, nil -} - -type bintree struct { - Func func() (*asset, error) - Children map[string]*bintree -} - -var _bintree = &bintree{nil, map[string]*bintree{ - "jsonschema-draft-04.json": &bintree{jsonschemaDraft04JSON, map[string]*bintree{}}, - "v2": &bintree{nil, map[string]*bintree{ - "schema.json": &bintree{v2SchemaJSON, map[string]*bintree{}}, - }}, -}} - -// RestoreAsset restores an asset under the given directory. -func RestoreAsset(dir, name string) error { - data, err := Asset(name) - if err != nil { - return err - } - info, err := AssetInfo(name) - if err != nil { - return err - } - err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) - if err != nil { - return err - } - err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) - if err != nil { - return err - } - return os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) -} - -// RestoreAssets restores an asset under the given directory recursively. -func RestoreAssets(dir, name string) error { - children, err := AssetDir(name) - // File - if err != nil { - return RestoreAsset(dir, name) - } - // Dir - for _, child := range children { - err = RestoreAssets(dir, filepath.Join(name, child)) - if err != nil { - return err - } - } - return nil -} - -func _filePath(dir, name string) string { - canonicalName := strings.Replace(name, "\\", "/", -1) - return filepath.Join(append([]string{dir}, strings.Split(canonicalName, "/")...)...) -} diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/cache.go b/cmd/bpa-operator/vendor/github.com/go-openapi/spec/cache.go deleted file mode 100644 index 3fada0d..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/cache.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import "sync" - -// ResolutionCache a cache for resolving urls -type ResolutionCache interface { - Get(string) (interface{}, bool) - Set(string, interface{}) -} - -type simpleCache struct { - lock sync.RWMutex - store map[string]interface{} -} - -// Get retrieves a cached URI -func (s *simpleCache) Get(uri string) (interface{}, bool) { - debugLog("getting %q from resolution cache", uri) - s.lock.RLock() - v, ok := s.store[uri] - debugLog("got %q from resolution cache: %t", uri, ok) - - s.lock.RUnlock() - return v, ok -} - -// Set caches a URI -func (s *simpleCache) Set(uri string, data interface{}) { - s.lock.Lock() - s.store[uri] = data - s.lock.Unlock() -} - -var resCache ResolutionCache - -func init() { - resCache = initResolutionCache() -} - -// initResolutionCache initializes the URI resolution cache -func initResolutionCache() ResolutionCache { - return &simpleCache{store: map[string]interface{}{ - "http://swagger.io/v2/schema.json": MustLoadSwagger20Schema(), - "http://json-schema.org/draft-04/schema": MustLoadJSONSchemaDraft04(), - }} -} diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/contact_info.go b/cmd/bpa-operator/vendor/github.com/go-openapi/spec/contact_info.go deleted file mode 100644 index f285970..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/contact_info.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -// ContactInfo contact information for the exposed API. -// -// For more information: http://goo.gl/8us55a#contactObject -type ContactInfo struct { - Name string `json:"name,omitempty"` - URL string `json:"url,omitempty"` - Email string `json:"email,omitempty"` -} diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/debug.go b/cmd/bpa-operator/vendor/github.com/go-openapi/spec/debug.go deleted file mode 100644 index 389c528..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/debug.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "fmt" - "log" - "os" - "path/filepath" - "runtime" -) - -var ( - // Debug is true when the SWAGGER_DEBUG env var is not empty. - // It enables a more verbose logging of this package. - Debug = os.Getenv("SWAGGER_DEBUG") != "" - // specLogger is a debug logger for this package - specLogger *log.Logger -) - -func init() { - debugOptions() -} - -func debugOptions() { - specLogger = log.New(os.Stdout, "spec:", log.LstdFlags) -} - -func debugLog(msg string, args ...interface{}) { - // A private, trivial trace logger, based on go-openapi/spec/expander.go:debugLog() - if Debug { - _, file1, pos1, _ := runtime.Caller(1) - specLogger.Printf("%s:%d: %s", filepath.Base(file1), pos1, fmt.Sprintf(msg, args...)) - } -} diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/expander.go b/cmd/bpa-operator/vendor/github.com/go-openapi/spec/expander.go deleted file mode 100644 index 1e7fc8c..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/expander.go +++ /dev/null @@ -1,650 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "fmt" - "strings" -) - -// ExpandOptions provides options for spec expand -type ExpandOptions struct { - RelativeBase string - SkipSchemas bool - ContinueOnError bool - AbsoluteCircularRef bool -} - -// ResolveRefWithBase resolves a reference against a context root with preservation of base path -func ResolveRefWithBase(root interface{}, ref *Ref, opts *ExpandOptions) (*Schema, error) { - resolver, err := defaultSchemaLoader(root, opts, nil, nil) - if err != nil { - return nil, err - } - specBasePath := "" - if opts != nil && opts.RelativeBase != "" { - specBasePath, _ = absPath(opts.RelativeBase) - } - - result := new(Schema) - if err := resolver.Resolve(ref, result, specBasePath); err != nil { - return nil, err - } - return result, nil -} - -// ResolveRef resolves a reference against a context root -// ref is guaranteed to be in root (no need to go to external files) -// ResolveRef is ONLY called from the code generation module -func ResolveRef(root interface{}, ref *Ref) (*Schema, error) { - res, _, err := ref.GetPointer().Get(root) - if err != nil { - panic(err) - } - switch sch := res.(type) { - case Schema: - return &sch, nil - case *Schema: - return sch, nil - case map[string]interface{}: - b, _ := json.Marshal(sch) - newSch := new(Schema) - _ = json.Unmarshal(b, newSch) - return newSch, nil - default: - return nil, fmt.Errorf("unknown type for the resolved reference") - } -} - -// ResolveParameter resolves a parameter reference against a context root -func ResolveParameter(root interface{}, ref Ref) (*Parameter, error) { - return ResolveParameterWithBase(root, ref, nil) -} - -// ResolveParameterWithBase resolves a parameter reference against a context root and base path -func ResolveParameterWithBase(root interface{}, ref Ref, opts *ExpandOptions) (*Parameter, error) { - resolver, err := defaultSchemaLoader(root, opts, nil, nil) - if err != nil { - return nil, err - } - - result := new(Parameter) - if err := resolver.Resolve(&ref, result, ""); err != nil { - return nil, err - } - return result, nil -} - -// ResolveResponse resolves response a reference against a context root -func ResolveResponse(root interface{}, ref Ref) (*Response, error) { - return ResolveResponseWithBase(root, ref, nil) -} - -// ResolveResponseWithBase resolves response a reference against a context root and base path -func ResolveResponseWithBase(root interface{}, ref Ref, opts *ExpandOptions) (*Response, error) { - resolver, err := defaultSchemaLoader(root, opts, nil, nil) - if err != nil { - return nil, err - } - - result := new(Response) - if err := resolver.Resolve(&ref, result, ""); err != nil { - return nil, err - } - return result, nil -} - -// ResolveItems resolves parameter items reference against a context root and base path. -// -// NOTE: stricly speaking, this construct is not supported by Swagger 2.0. -// Similarly, $ref are forbidden in response headers. -func ResolveItems(root interface{}, ref Ref, opts *ExpandOptions) (*Items, error) { - resolver, err := defaultSchemaLoader(root, opts, nil, nil) - if err != nil { - return nil, err - } - basePath := "" - if opts.RelativeBase != "" { - basePath = opts.RelativeBase - } - result := new(Items) - if err := resolver.Resolve(&ref, result, basePath); err != nil { - return nil, err - } - return result, nil -} - -// ResolvePathItem resolves response a path item against a context root and base path -func ResolvePathItem(root interface{}, ref Ref, opts *ExpandOptions) (*PathItem, error) { - resolver, err := defaultSchemaLoader(root, opts, nil, nil) - if err != nil { - return nil, err - } - basePath := "" - if opts.RelativeBase != "" { - basePath = opts.RelativeBase - } - result := new(PathItem) - if err := resolver.Resolve(&ref, result, basePath); err != nil { - return nil, err - } - return result, nil -} - -// ExpandSpec expands the references in a swagger spec -func ExpandSpec(spec *Swagger, options *ExpandOptions) error { - resolver, err := defaultSchemaLoader(spec, options, nil, nil) - // Just in case this ever returns an error. - if resolver.shouldStopOnError(err) { - return err - } - - // getting the base path of the spec to adjust all subsequent reference resolutions - specBasePath := "" - if options != nil && options.RelativeBase != "" { - specBasePath, _ = absPath(options.RelativeBase) - } - - if options == nil || !options.SkipSchemas { - for key, definition := range spec.Definitions { - var def *Schema - var err error - if def, err = expandSchema(definition, []string{fmt.Sprintf("#/definitions/%s", key)}, resolver, specBasePath); resolver.shouldStopOnError(err) { - return err - } - if def != nil { - spec.Definitions[key] = *def - } - } - } - - for key := range spec.Parameters { - parameter := spec.Parameters[key] - if err := expandParameterOrResponse(¶meter, resolver, specBasePath); resolver.shouldStopOnError(err) { - return err - } - spec.Parameters[key] = parameter - } - - for key := range spec.Responses { - response := spec.Responses[key] - if err := expandParameterOrResponse(&response, resolver, specBasePath); resolver.shouldStopOnError(err) { - return err - } - spec.Responses[key] = response - } - - if spec.Paths != nil { - for key := range spec.Paths.Paths { - path := spec.Paths.Paths[key] - if err := expandPathItem(&path, resolver, specBasePath); resolver.shouldStopOnError(err) { - return err - } - spec.Paths.Paths[key] = path - } - } - - return nil -} - -// baseForRoot loads in the cache the root document and produces a fake "root" base path entry -// for further $ref resolution -func baseForRoot(root interface{}, cache ResolutionCache) string { - // cache the root document to resolve $ref's - const rootBase = "root" - if root != nil { - base, _ := absPath(rootBase) - normalizedBase := normalizeAbsPath(base) - debugLog("setting root doc in cache at: %s", normalizedBase) - if cache == nil { - cache = resCache - } - cache.Set(normalizedBase, root) - return rootBase - } - return "" -} - -// ExpandSchema expands the refs in the schema object with reference to the root object -// go-openapi/validate uses this function -// notice that it is impossible to reference a json schema in a different file other than root -func ExpandSchema(schema *Schema, root interface{}, cache ResolutionCache) error { - opts := &ExpandOptions{ - // when a root is specified, cache the root as an in-memory document for $ref retrieval - RelativeBase: baseForRoot(root, cache), - SkipSchemas: false, - ContinueOnError: false, - // when no base path is specified, remaining $ref (circular) are rendered with an absolute path - AbsoluteCircularRef: true, - } - return ExpandSchemaWithBasePath(schema, cache, opts) -} - -// ExpandSchemaWithBasePath expands the refs in the schema object, base path configured through expand options -func ExpandSchemaWithBasePath(schema *Schema, cache ResolutionCache, opts *ExpandOptions) error { - if schema == nil { - return nil - } - - var basePath string - if opts.RelativeBase != "" { - basePath, _ = absPath(opts.RelativeBase) - } - - resolver, err := defaultSchemaLoader(nil, opts, cache, nil) - if err != nil { - return err - } - - refs := []string{""} - var s *Schema - if s, err = expandSchema(*schema, refs, resolver, basePath); err != nil { - return err - } - *schema = *s - return nil -} - -func expandItems(target Schema, parentRefs []string, resolver *schemaLoader, basePath string) (*Schema, error) { - if target.Items != nil { - if target.Items.Schema != nil { - t, err := expandSchema(*target.Items.Schema, parentRefs, resolver, basePath) - if err != nil { - return nil, err - } - *target.Items.Schema = *t - } - for i := range target.Items.Schemas { - t, err := expandSchema(target.Items.Schemas[i], parentRefs, resolver, basePath) - if err != nil { - return nil, err - } - target.Items.Schemas[i] = *t - } - } - return &target, nil -} - -func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, basePath string) (*Schema, error) { - if target.Ref.String() == "" && target.Ref.IsRoot() { - // normalizing is important - newRef := normalizeFileRef(&target.Ref, basePath) - target.Ref = *newRef - return &target, nil - - } - - // change the base path of resolution when an ID is encountered - // otherwise the basePath should inherit the parent's - // important: ID can be relative path - if target.ID != "" { - debugLog("schema has ID: %s", target.ID) - // handling the case when id is a folder - // remember that basePath has to be a file - refPath := target.ID - if strings.HasSuffix(target.ID, "/") { - // path.Clean here would not work correctly if basepath is http - refPath = fmt.Sprintf("%s%s", refPath, "placeholder.json") - } - basePath = normalizePaths(refPath, basePath) - } - - var t *Schema - // if Ref is found, everything else doesn't matter - // Ref also changes the resolution scope of children expandSchema - if target.Ref.String() != "" { - // here the resolution scope is changed because a $ref was encountered - normalizedRef := normalizeFileRef(&target.Ref, basePath) - normalizedBasePath := normalizedRef.RemoteURI() - - if resolver.isCircular(normalizedRef, basePath, parentRefs...) { - // this means there is a cycle in the recursion tree: return the Ref - // - circular refs cannot be expanded. We leave them as ref. - // - denormalization means that a new local file ref is set relative to the original basePath - debugLog("shortcut circular ref: basePath: %s, normalizedPath: %s, normalized ref: %s", - basePath, normalizedBasePath, normalizedRef.String()) - if !resolver.options.AbsoluteCircularRef { - target.Ref = *denormalizeFileRef(normalizedRef, normalizedBasePath, resolver.context.basePath) - } else { - target.Ref = *normalizedRef - } - return &target, nil - } - - debugLog("basePath: %s: calling Resolve with target: %#v", basePath, target) - if err := resolver.Resolve(&target.Ref, &t, basePath); resolver.shouldStopOnError(err) { - return nil, err - } - - if t != nil { - parentRefs = append(parentRefs, normalizedRef.String()) - var err error - transitiveResolver, err := resolver.transitiveResolver(basePath, target.Ref) - if transitiveResolver.shouldStopOnError(err) { - return nil, err - } - - basePath = resolver.updateBasePath(transitiveResolver, normalizedBasePath) - - return expandSchema(*t, parentRefs, transitiveResolver, basePath) - } - } - - t, err := expandItems(target, parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return &target, err - } - if t != nil { - target = *t - } - - for i := range target.AllOf { - t, err := expandSchema(target.AllOf[i], parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return &target, err - } - target.AllOf[i] = *t - } - for i := range target.AnyOf { - t, err := expandSchema(target.AnyOf[i], parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return &target, err - } - target.AnyOf[i] = *t - } - for i := range target.OneOf { - t, err := expandSchema(target.OneOf[i], parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return &target, err - } - if t != nil { - target.OneOf[i] = *t - } - } - if target.Not != nil { - t, err := expandSchema(*target.Not, parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return &target, err - } - if t != nil { - *target.Not = *t - } - } - for k := range target.Properties { - t, err := expandSchema(target.Properties[k], parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return &target, err - } - if t != nil { - target.Properties[k] = *t - } - } - if target.AdditionalProperties != nil && target.AdditionalProperties.Schema != nil { - t, err := expandSchema(*target.AdditionalProperties.Schema, parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return &target, err - } - if t != nil { - *target.AdditionalProperties.Schema = *t - } - } - for k := range target.PatternProperties { - t, err := expandSchema(target.PatternProperties[k], parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return &target, err - } - if t != nil { - target.PatternProperties[k] = *t - } - } - for k := range target.Dependencies { - if target.Dependencies[k].Schema != nil { - t, err := expandSchema(*target.Dependencies[k].Schema, parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return &target, err - } - if t != nil { - *target.Dependencies[k].Schema = *t - } - } - } - if target.AdditionalItems != nil && target.AdditionalItems.Schema != nil { - t, err := expandSchema(*target.AdditionalItems.Schema, parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return &target, err - } - if t != nil { - *target.AdditionalItems.Schema = *t - } - } - for k := range target.Definitions { - t, err := expandSchema(target.Definitions[k], parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return &target, err - } - if t != nil { - target.Definitions[k] = *t - } - } - return &target, nil -} - -func expandPathItem(pathItem *PathItem, resolver *schemaLoader, basePath string) error { - if pathItem == nil { - return nil - } - - parentRefs := []string{} - if err := resolver.deref(pathItem, parentRefs, basePath); resolver.shouldStopOnError(err) { - return err - } - if pathItem.Ref.String() != "" { - var err error - resolver, err = resolver.transitiveResolver(basePath, pathItem.Ref) - if resolver.shouldStopOnError(err) { - return err - } - } - pathItem.Ref = Ref{} - - for idx := range pathItem.Parameters { - if err := expandParameterOrResponse(&(pathItem.Parameters[idx]), resolver, basePath); resolver.shouldStopOnError(err) { - return err - } - } - ops := []*Operation{ - pathItem.Get, - pathItem.Head, - pathItem.Options, - pathItem.Put, - pathItem.Post, - pathItem.Patch, - pathItem.Delete, - } - for _, op := range ops { - if err := expandOperation(op, resolver, basePath); resolver.shouldStopOnError(err) { - return err - } - } - return nil -} - -func expandOperation(op *Operation, resolver *schemaLoader, basePath string) error { - if op == nil { - return nil - } - - for i := range op.Parameters { - param := op.Parameters[i] - if err := expandParameterOrResponse(¶m, resolver, basePath); resolver.shouldStopOnError(err) { - return err - } - op.Parameters[i] = param - } - - if op.Responses != nil { - responses := op.Responses - if err := expandParameterOrResponse(responses.Default, resolver, basePath); resolver.shouldStopOnError(err) { - return err - } - for code := range responses.StatusCodeResponses { - response := responses.StatusCodeResponses[code] - if err := expandParameterOrResponse(&response, resolver, basePath); resolver.shouldStopOnError(err) { - return err - } - responses.StatusCodeResponses[code] = response - } - } - return nil -} - -// ExpandResponseWithRoot expands a response based on a root document, not a fetchable document -func ExpandResponseWithRoot(response *Response, root interface{}, cache ResolutionCache) error { - opts := &ExpandOptions{ - RelativeBase: baseForRoot(root, cache), - SkipSchemas: false, - ContinueOnError: false, - // when no base path is specified, remaining $ref (circular) are rendered with an absolute path - AbsoluteCircularRef: true, - } - resolver, err := defaultSchemaLoader(root, opts, nil, nil) - if err != nil { - return err - } - - return expandParameterOrResponse(response, resolver, opts.RelativeBase) -} - -// ExpandResponse expands a response based on a basepath -// This is the exported version of expandResponse -// all refs inside response will be resolved relative to basePath -func ExpandResponse(response *Response, basePath string) error { - var specBasePath string - if basePath != "" { - specBasePath, _ = absPath(basePath) - } - opts := &ExpandOptions{ - RelativeBase: specBasePath, - } - resolver, err := defaultSchemaLoader(nil, opts, nil, nil) - if err != nil { - return err - } - - return expandParameterOrResponse(response, resolver, opts.RelativeBase) -} - -// ExpandParameterWithRoot expands a parameter based on a root document, not a fetchable document -func ExpandParameterWithRoot(parameter *Parameter, root interface{}, cache ResolutionCache) error { - opts := &ExpandOptions{ - RelativeBase: baseForRoot(root, cache), - SkipSchemas: false, - ContinueOnError: false, - // when no base path is specified, remaining $ref (circular) are rendered with an absolute path - AbsoluteCircularRef: true, - } - resolver, err := defaultSchemaLoader(root, opts, nil, nil) - if err != nil { - return err - } - - return expandParameterOrResponse(parameter, resolver, opts.RelativeBase) -} - -// ExpandParameter expands a parameter based on a basepath. -// This is the exported version of expandParameter -// all refs inside parameter will be resolved relative to basePath -func ExpandParameter(parameter *Parameter, basePath string) error { - var specBasePath string - if basePath != "" { - specBasePath, _ = absPath(basePath) - } - opts := &ExpandOptions{ - RelativeBase: specBasePath, - } - resolver, err := defaultSchemaLoader(nil, opts, nil, nil) - if err != nil { - return err - } - - return expandParameterOrResponse(parameter, resolver, opts.RelativeBase) -} - -func getRefAndSchema(input interface{}) (*Ref, *Schema, error) { - var ref *Ref - var sch *Schema - switch refable := input.(type) { - case *Parameter: - if refable == nil { - return nil, nil, nil - } - ref = &refable.Ref - sch = refable.Schema - case *Response: - if refable == nil { - return nil, nil, nil - } - ref = &refable.Ref - sch = refable.Schema - default: - return nil, nil, fmt.Errorf("expand: unsupported type %T. Input should be of type *Parameter or *Response", input) - } - return ref, sch, nil -} - -func expandParameterOrResponse(input interface{}, resolver *schemaLoader, basePath string) error { - ref, _, err := getRefAndSchema(input) - if err != nil { - return err - } - if ref == nil { - return nil - } - parentRefs := []string{} - if err := resolver.deref(input, parentRefs, basePath); resolver.shouldStopOnError(err) { - return err - } - ref, sch, _ := getRefAndSchema(input) - if ref.String() != "" { - transitiveResolver, err := resolver.transitiveResolver(basePath, *ref) - if transitiveResolver.shouldStopOnError(err) { - return err - } - basePath = resolver.updateBasePath(transitiveResolver, basePath) - resolver = transitiveResolver - } - - if sch != nil && sch.Ref.String() != "" { - // schema expanded to a $ref in another root - var ern error - sch.Ref, ern = NewRef(normalizePaths(sch.Ref.String(), ref.RemoteURI())) - if ern != nil { - return ern - } - } - if ref != nil { - *ref = Ref{} - } - - if !resolver.options.SkipSchemas && sch != nil { - s, err := expandSchema(*sch, parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return err - } - *sch = *s - } - return nil -} diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/external_docs.go b/cmd/bpa-operator/vendor/github.com/go-openapi/spec/external_docs.go deleted file mode 100644 index 88add91..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/external_docs.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -// ExternalDocumentation allows referencing an external resource for -// extended documentation. -// -// For more information: http://goo.gl/8us55a#externalDocumentationObject -type ExternalDocumentation struct { - Description string `json:"description,omitempty"` - URL string `json:"url,omitempty"` -} diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/go.mod b/cmd/bpa-operator/vendor/github.com/go-openapi/spec/go.mod deleted file mode 100644 index 5af64c1..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/go.mod +++ /dev/null @@ -1,16 +0,0 @@ -module github.com/go-openapi/spec - -require ( - github.com/PuerkitoBio/purell v1.1.0 // indirect - github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/go-openapi/jsonpointer v0.17.0 - github.com/go-openapi/jsonreference v0.17.0 - github.com/go-openapi/swag v0.17.0 - github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/stretchr/testify v1.2.2 - golang.org/x/net v0.0.0-20181005035420-146acd28ed58 // indirect - golang.org/x/text v0.3.0 // indirect - gopkg.in/yaml.v2 v2.2.1 -) diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/go.sum b/cmd/bpa-operator/vendor/github.com/go-openapi/spec/go.sum deleted file mode 100644 index ab6bfb6..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/go.sum +++ /dev/null @@ -1,22 +0,0 @@ -github.com/PuerkitoBio/purell v1.1.0 h1:rmGxhojJlM0tuKtfdvliR84CFHljx9ag64t2xmVkjK4= -github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/go-openapi/jsonpointer v0.17.0 h1:Bpl2DtZ6k7wKqfFs7e+4P08+M9I3FQgn09a1UsRUQbk= -github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= -github.com/go-openapi/jsonreference v0.17.0 h1:d/o7/fsLWWQZACbihvZxcyLQ59jfUVs7WOJv/ak7T7A= -github.com/go-openapi/jsonreference v0.17.0/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= -github.com/go-openapi/swag v0.17.0 h1:7wu+dZ5k83kvUWeAb+WUkFiUhDzwGqzTR/NhWzeo1JU= -github.com/go-openapi/swag v0.17.0/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 h1:2gxZ0XQIU/5z3Z3bUBu+FXuk2pFbkN6tcwi/pjyaDic= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -golang.org/x/net v0.0.0-20181005035420-146acd28ed58 h1:otZG8yDCO4LVps5+9bxOeNiCvgmOyt96J3roHTYs7oE= -golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/header.go b/cmd/bpa-operator/vendor/github.com/go-openapi/spec/header.go deleted file mode 100644 index 39efe45..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/header.go +++ /dev/null @@ -1,197 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "strings" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -const ( - jsonArray = "array" -) - -// HeaderProps describes a response header -type HeaderProps struct { - Description string `json:"description,omitempty"` -} - -// Header describes a header for a response of the API -// -// For more information: http://goo.gl/8us55a#headerObject -type Header struct { - CommonValidations - SimpleSchema - VendorExtensible - HeaderProps -} - -// ResponseHeader creates a new header instance for use in a response -func ResponseHeader() *Header { - return new(Header) -} - -// WithDescription sets the description on this response, allows for chaining -func (h *Header) WithDescription(description string) *Header { - h.Description = description - return h -} - -// Typed a fluent builder method for the type of parameter -func (h *Header) Typed(tpe, format string) *Header { - h.Type = tpe - h.Format = format - return h -} - -// CollectionOf a fluent builder method for an array item -func (h *Header) CollectionOf(items *Items, format string) *Header { - h.Type = jsonArray - h.Items = items - h.CollectionFormat = format - return h -} - -// WithDefault sets the default value on this item -func (h *Header) WithDefault(defaultValue interface{}) *Header { - h.Default = defaultValue - return h -} - -// WithMaxLength sets a max length value -func (h *Header) WithMaxLength(max int64) *Header { - h.MaxLength = &max - return h -} - -// WithMinLength sets a min length value -func (h *Header) WithMinLength(min int64) *Header { - h.MinLength = &min - return h -} - -// WithPattern sets a pattern value -func (h *Header) WithPattern(pattern string) *Header { - h.Pattern = pattern - return h -} - -// WithMultipleOf sets a multiple of value -func (h *Header) WithMultipleOf(number float64) *Header { - h.MultipleOf = &number - return h -} - -// WithMaximum sets a maximum number value -func (h *Header) WithMaximum(max float64, exclusive bool) *Header { - h.Maximum = &max - h.ExclusiveMaximum = exclusive - return h -} - -// WithMinimum sets a minimum number value -func (h *Header) WithMinimum(min float64, exclusive bool) *Header { - h.Minimum = &min - h.ExclusiveMinimum = exclusive - return h -} - -// WithEnum sets a the enum values (replace) -func (h *Header) WithEnum(values ...interface{}) *Header { - h.Enum = append([]interface{}{}, values...) - return h -} - -// WithMaxItems sets the max items -func (h *Header) WithMaxItems(size int64) *Header { - h.MaxItems = &size - return h -} - -// WithMinItems sets the min items -func (h *Header) WithMinItems(size int64) *Header { - h.MinItems = &size - return h -} - -// UniqueValues dictates that this array can only have unique items -func (h *Header) UniqueValues() *Header { - h.UniqueItems = true - return h -} - -// AllowDuplicates this array can have duplicates -func (h *Header) AllowDuplicates() *Header { - h.UniqueItems = false - return h -} - -// MarshalJSON marshal this to JSON -func (h Header) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(h.CommonValidations) - if err != nil { - return nil, err - } - b2, err := json.Marshal(h.SimpleSchema) - if err != nil { - return nil, err - } - b3, err := json.Marshal(h.HeaderProps) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2, b3), nil -} - -// UnmarshalJSON unmarshals this header from JSON -func (h *Header) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &h.CommonValidations); err != nil { - return err - } - if err := json.Unmarshal(data, &h.SimpleSchema); err != nil { - return err - } - if err := json.Unmarshal(data, &h.VendorExtensible); err != nil { - return err - } - return json.Unmarshal(data, &h.HeaderProps) -} - -// JSONLookup look up a value by the json property name -func (h Header) JSONLookup(token string) (interface{}, error) { - if ex, ok := h.Extensions[token]; ok { - return &ex, nil - } - - r, _, err := jsonpointer.GetForToken(h.CommonValidations, token) - if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { - return nil, err - } - if r != nil { - return r, nil - } - r, _, err = jsonpointer.GetForToken(h.SimpleSchema, token) - if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { - return nil, err - } - if r != nil { - return r, nil - } - r, _, err = jsonpointer.GetForToken(h.HeaderProps, token) - return r, err -} diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/info.go b/cmd/bpa-operator/vendor/github.com/go-openapi/spec/info.go deleted file mode 100644 index c458b49..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/info.go +++ /dev/null @@ -1,165 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "strings" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -// Extensions vendor specific extensions -type Extensions map[string]interface{} - -// Add adds a value to these extensions -func (e Extensions) Add(key string, value interface{}) { - realKey := strings.ToLower(key) - e[realKey] = value -} - -// GetString gets a string value from the extensions -func (e Extensions) GetString(key string) (string, bool) { - if v, ok := e[strings.ToLower(key)]; ok { - str, ok := v.(string) - return str, ok - } - return "", false -} - -// GetBool gets a string value from the extensions -func (e Extensions) GetBool(key string) (bool, bool) { - if v, ok := e[strings.ToLower(key)]; ok { - str, ok := v.(bool) - return str, ok - } - return false, false -} - -// GetStringSlice gets a string value from the extensions -func (e Extensions) GetStringSlice(key string) ([]string, bool) { - if v, ok := e[strings.ToLower(key)]; ok { - arr, isSlice := v.([]interface{}) - if !isSlice { - return nil, false - } - var strs []string - for _, iface := range arr { - str, isString := iface.(string) - if !isString { - return nil, false - } - strs = append(strs, str) - } - return strs, ok - } - return nil, false -} - -// VendorExtensible composition block. -type VendorExtensible struct { - Extensions Extensions -} - -// AddExtension adds an extension to this extensible object -func (v *VendorExtensible) AddExtension(key string, value interface{}) { - if value == nil { - return - } - if v.Extensions == nil { - v.Extensions = make(map[string]interface{}) - } - v.Extensions.Add(key, value) -} - -// MarshalJSON marshals the extensions to json -func (v VendorExtensible) MarshalJSON() ([]byte, error) { - toser := make(map[string]interface{}) - for k, v := range v.Extensions { - lk := strings.ToLower(k) - if strings.HasPrefix(lk, "x-") { - toser[k] = v - } - } - return json.Marshal(toser) -} - -// UnmarshalJSON for this extensible object -func (v *VendorExtensible) UnmarshalJSON(data []byte) error { - var d map[string]interface{} - if err := json.Unmarshal(data, &d); err != nil { - return err - } - for k, vv := range d { - lk := strings.ToLower(k) - if strings.HasPrefix(lk, "x-") { - if v.Extensions == nil { - v.Extensions = map[string]interface{}{} - } - v.Extensions[k] = vv - } - } - return nil -} - -// InfoProps the properties for an info definition -type InfoProps struct { - Description string `json:"description,omitempty"` - Title string `json:"title,omitempty"` - TermsOfService string `json:"termsOfService,omitempty"` - Contact *ContactInfo `json:"contact,omitempty"` - License *License `json:"license,omitempty"` - Version string `json:"version,omitempty"` -} - -// Info object provides metadata about the API. -// The metadata can be used by the clients if needed, and can be presented in the Swagger-UI for convenience. -// -// For more information: http://goo.gl/8us55a#infoObject -type Info struct { - VendorExtensible - InfoProps -} - -// JSONLookup look up a value by the json property name -func (i Info) JSONLookup(token string) (interface{}, error) { - if ex, ok := i.Extensions[token]; ok { - return &ex, nil - } - r, _, err := jsonpointer.GetForToken(i.InfoProps, token) - return r, err -} - -// MarshalJSON marshal this to JSON -func (i Info) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(i.InfoProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(i.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2), nil -} - -// UnmarshalJSON marshal this from JSON -func (i *Info) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &i.InfoProps); err != nil { - return err - } - return json.Unmarshal(data, &i.VendorExtensible) -} diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/items.go b/cmd/bpa-operator/vendor/github.com/go-openapi/spec/items.go deleted file mode 100644 index 7838931..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/items.go +++ /dev/null @@ -1,237 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "strings" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -const ( - jsonRef = "$ref" -) - -// SimpleSchema describe swagger simple schemas for parameters and headers -type SimpleSchema struct { - Type string `json:"type,omitempty"` - Format string `json:"format,omitempty"` - Items *Items `json:"items,omitempty"` - CollectionFormat string `json:"collectionFormat,omitempty"` - Default interface{} `json:"default,omitempty"` - Example interface{} `json:"example,omitempty"` -} - -// TypeName return the type (or format) of a simple schema -func (s *SimpleSchema) TypeName() string { - if s.Format != "" { - return s.Format - } - return s.Type -} - -// ItemsTypeName yields the type of items in a simple schema array -func (s *SimpleSchema) ItemsTypeName() string { - if s.Items == nil { - return "" - } - return s.Items.TypeName() -} - -// CommonValidations describe common JSON-schema validations -type CommonValidations struct { - Maximum *float64 `json:"maximum,omitempty"` - ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"` - Minimum *float64 `json:"minimum,omitempty"` - ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty"` - MaxLength *int64 `json:"maxLength,omitempty"` - MinLength *int64 `json:"minLength,omitempty"` - Pattern string `json:"pattern,omitempty"` - MaxItems *int64 `json:"maxItems,omitempty"` - MinItems *int64 `json:"minItems,omitempty"` - UniqueItems bool `json:"uniqueItems,omitempty"` - MultipleOf *float64 `json:"multipleOf,omitempty"` - Enum []interface{} `json:"enum,omitempty"` -} - -// Items a limited subset of JSON-Schema's items object. -// It is used by parameter definitions that are not located in "body". -// -// For more information: http://goo.gl/8us55a#items-object -type Items struct { - Refable - CommonValidations - SimpleSchema - VendorExtensible -} - -// NewItems creates a new instance of items -func NewItems() *Items { - return &Items{} -} - -// Typed a fluent builder method for the type of item -func (i *Items) Typed(tpe, format string) *Items { - i.Type = tpe - i.Format = format - return i -} - -// CollectionOf a fluent builder method for an array item -func (i *Items) CollectionOf(items *Items, format string) *Items { - i.Type = jsonArray - i.Items = items - i.CollectionFormat = format - return i -} - -// WithDefault sets the default value on this item -func (i *Items) WithDefault(defaultValue interface{}) *Items { - i.Default = defaultValue - return i -} - -// WithMaxLength sets a max length value -func (i *Items) WithMaxLength(max int64) *Items { - i.MaxLength = &max - return i -} - -// WithMinLength sets a min length value -func (i *Items) WithMinLength(min int64) *Items { - i.MinLength = &min - return i -} - -// WithPattern sets a pattern value -func (i *Items) WithPattern(pattern string) *Items { - i.Pattern = pattern - return i -} - -// WithMultipleOf sets a multiple of value -func (i *Items) WithMultipleOf(number float64) *Items { - i.MultipleOf = &number - return i -} - -// WithMaximum sets a maximum number value -func (i *Items) WithMaximum(max float64, exclusive bool) *Items { - i.Maximum = &max - i.ExclusiveMaximum = exclusive - return i -} - -// WithMinimum sets a minimum number value -func (i *Items) WithMinimum(min float64, exclusive bool) *Items { - i.Minimum = &min - i.ExclusiveMinimum = exclusive - return i -} - -// WithEnum sets a the enum values (replace) -func (i *Items) WithEnum(values ...interface{}) *Items { - i.Enum = append([]interface{}{}, values...) - return i -} - -// WithMaxItems sets the max items -func (i *Items) WithMaxItems(size int64) *Items { - i.MaxItems = &size - return i -} - -// WithMinItems sets the min items -func (i *Items) WithMinItems(size int64) *Items { - i.MinItems = &size - return i -} - -// UniqueValues dictates that this array can only have unique items -func (i *Items) UniqueValues() *Items { - i.UniqueItems = true - return i -} - -// AllowDuplicates this array can have duplicates -func (i *Items) AllowDuplicates() *Items { - i.UniqueItems = false - return i -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (i *Items) UnmarshalJSON(data []byte) error { - var validations CommonValidations - if err := json.Unmarshal(data, &validations); err != nil { - return err - } - var ref Refable - if err := json.Unmarshal(data, &ref); err != nil { - return err - } - var simpleSchema SimpleSchema - if err := json.Unmarshal(data, &simpleSchema); err != nil { - return err - } - var vendorExtensible VendorExtensible - if err := json.Unmarshal(data, &vendorExtensible); err != nil { - return err - } - i.Refable = ref - i.CommonValidations = validations - i.SimpleSchema = simpleSchema - i.VendorExtensible = vendorExtensible - return nil -} - -// MarshalJSON converts this items object to JSON -func (i Items) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(i.CommonValidations) - if err != nil { - return nil, err - } - b2, err := json.Marshal(i.SimpleSchema) - if err != nil { - return nil, err - } - b3, err := json.Marshal(i.Refable) - if err != nil { - return nil, err - } - b4, err := json.Marshal(i.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b4, b3, b1, b2), nil -} - -// JSONLookup look up a value by the json property name -func (i Items) JSONLookup(token string) (interface{}, error) { - if token == jsonRef { - return &i.Ref, nil - } - - r, _, err := jsonpointer.GetForToken(i.CommonValidations, token) - if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { - return nil, err - } - if r != nil { - return r, nil - } - r, _, err = jsonpointer.GetForToken(i.SimpleSchema, token) - return r, err -} diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/license.go b/cmd/bpa-operator/vendor/github.com/go-openapi/spec/license.go deleted file mode 100644 index f20961b..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/license.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -// License information for the exposed API. -// -// For more information: http://goo.gl/8us55a#licenseObject -type License struct { - Name string `json:"name,omitempty"` - URL string `json:"url,omitempty"` -} diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/normalizer.go b/cmd/bpa-operator/vendor/github.com/go-openapi/spec/normalizer.go deleted file mode 100644 index b8957e7..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/normalizer.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "fmt" - "net/url" - "os" - "path" - "path/filepath" - "strings" -) - -// normalize absolute path for cache. -// on Windows, drive letters should be converted to lower as scheme in net/url.URL -func normalizeAbsPath(path string) string { - u, err := url.Parse(path) - if err != nil { - debugLog("normalize absolute path failed: %s", err) - return path - } - return u.String() -} - -// base or refPath could be a file path or a URL -// given a base absolute path and a ref path, return the absolute path of refPath -// 1) if refPath is absolute, return it -// 2) if refPath is relative, join it with basePath keeping the scheme, hosts, and ports if exists -// base could be a directory or a full file path -func normalizePaths(refPath, base string) string { - refURL, _ := url.Parse(refPath) - if path.IsAbs(refURL.Path) || filepath.IsAbs(refPath) { - // refPath is actually absolute - if refURL.Host != "" { - return refPath - } - parts := strings.Split(refPath, "#") - result := filepath.FromSlash(parts[0]) - if len(parts) == 2 { - result += "#" + parts[1] - } - return result - } - - // relative refPath - baseURL, _ := url.Parse(base) - if !strings.HasPrefix(refPath, "#") { - // combining paths - if baseURL.Host != "" { - baseURL.Path = path.Join(path.Dir(baseURL.Path), refURL.Path) - } else { // base is a file - newBase := fmt.Sprintf("%s#%s", filepath.Join(filepath.Dir(base), filepath.FromSlash(refURL.Path)), refURL.Fragment) - return newBase - } - - } - // copying fragment from ref to base - baseURL.Fragment = refURL.Fragment - return baseURL.String() -} - -// denormalizePaths returns to simplest notation on file $ref, -// i.e. strips the absolute path and sets a path relative to the base path. -// -// This is currently used when we rewrite ref after a circular ref has been detected -func denormalizeFileRef(ref *Ref, relativeBase, originalRelativeBase string) *Ref { - debugLog("denormalizeFileRef for: %s", ref.String()) - - if ref.String() == "" || ref.IsRoot() || ref.HasFragmentOnly { - return ref - } - // strip relativeBase from URI - relativeBaseURL, _ := url.Parse(relativeBase) - relativeBaseURL.Fragment = "" - - if relativeBaseURL.IsAbs() && strings.HasPrefix(ref.String(), relativeBase) { - // this should work for absolute URI (e.g. http://...): we have an exact match, just trim prefix - r, _ := NewRef(strings.TrimPrefix(ref.String(), relativeBase)) - return &r - } - - if relativeBaseURL.IsAbs() { - // other absolute URL get unchanged (i.e. with a non-empty scheme) - return ref - } - - // for relative file URIs: - originalRelativeBaseURL, _ := url.Parse(originalRelativeBase) - originalRelativeBaseURL.Fragment = "" - if strings.HasPrefix(ref.String(), originalRelativeBaseURL.String()) { - // the resulting ref is in the expanded spec: return a local ref - r, _ := NewRef(strings.TrimPrefix(ref.String(), originalRelativeBaseURL.String())) - return &r - } - - // check if we may set a relative path, considering the original base path for this spec. - // Example: - // spec is located at /mypath/spec.json - // my normalized ref points to: /mypath/item.json#/target - // expected result: item.json#/target - parts := strings.Split(ref.String(), "#") - relativePath, err := filepath.Rel(path.Dir(originalRelativeBaseURL.String()), parts[0]) - if err != nil { - // there is no common ancestor (e.g. different drives on windows) - // leaves the ref unchanged - return ref - } - if len(parts) == 2 { - relativePath += "#" + parts[1] - } - r, _ := NewRef(relativePath) - return &r -} - -// relativeBase could be an ABSOLUTE file path or an ABSOLUTE URL -func normalizeFileRef(ref *Ref, relativeBase string) *Ref { - // This is important for when the reference is pointing to the root schema - if ref.String() == "" { - r, _ := NewRef(relativeBase) - return &r - } - - debugLog("normalizing %s against %s", ref.String(), relativeBase) - - s := normalizePaths(ref.String(), relativeBase) - r, _ := NewRef(s) - return &r -} - -// absPath returns the absolute path of a file -func absPath(fname string) (string, error) { - if strings.HasPrefix(fname, "http") { - return fname, nil - } - if filepath.IsAbs(fname) { - return fname, nil - } - wd, err := os.Getwd() - return filepath.Join(wd, fname), err -} diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/operation.go b/cmd/bpa-operator/vendor/github.com/go-openapi/spec/operation.go deleted file mode 100644 index b1ebd59..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/operation.go +++ /dev/null @@ -1,398 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "bytes" - "encoding/gob" - "encoding/json" - "sort" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -func init() { - //gob.Register(map[string][]interface{}{}) - gob.Register(map[string]interface{}{}) - gob.Register([]interface{}{}) -} - -// OperationProps describes an operation -// -// NOTES: -// - schemes, when present must be from [http, https, ws, wss]: see validate -// - Security is handled as a special case: see MarshalJSON function -type OperationProps struct { - Description string `json:"description,omitempty"` - Consumes []string `json:"consumes,omitempty"` - Produces []string `json:"produces,omitempty"` - Schemes []string `json:"schemes,omitempty"` - Tags []string `json:"tags,omitempty"` - Summary string `json:"summary,omitempty"` - ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` - ID string `json:"operationId,omitempty"` - Deprecated bool `json:"deprecated,omitempty"` - Security []map[string][]string `json:"security,omitempty"` - Parameters []Parameter `json:"parameters,omitempty"` - Responses *Responses `json:"responses,omitempty"` -} - -// MarshalJSON takes care of serializing operation properties to JSON -// -// We use a custom marhaller here to handle a special cases related to -// the Security field. We need to preserve zero length slice -// while omitting the field when the value is nil/unset. -func (op OperationProps) MarshalJSON() ([]byte, error) { - type Alias OperationProps - if op.Security == nil { - return json.Marshal(&struct { - Security []map[string][]string `json:"security,omitempty"` - *Alias - }{ - Security: op.Security, - Alias: (*Alias)(&op), - }) - } - return json.Marshal(&struct { - Security []map[string][]string `json:"security"` - *Alias - }{ - Security: op.Security, - Alias: (*Alias)(&op), - }) -} - -// Operation describes a single API operation on a path. -// -// For more information: http://goo.gl/8us55a#operationObject -type Operation struct { - VendorExtensible - OperationProps -} - -// SuccessResponse gets a success response model -func (o *Operation) SuccessResponse() (*Response, int, bool) { - if o.Responses == nil { - return nil, 0, false - } - - responseCodes := make([]int, 0, len(o.Responses.StatusCodeResponses)) - for k := range o.Responses.StatusCodeResponses { - if k >= 200 && k < 300 { - responseCodes = append(responseCodes, k) - } - } - if len(responseCodes) > 0 { - sort.Ints(responseCodes) - v := o.Responses.StatusCodeResponses[responseCodes[0]] - return &v, responseCodes[0], true - } - - return o.Responses.Default, 0, false -} - -// JSONLookup look up a value by the json property name -func (o Operation) JSONLookup(token string) (interface{}, error) { - if ex, ok := o.Extensions[token]; ok { - return &ex, nil - } - r, _, err := jsonpointer.GetForToken(o.OperationProps, token) - return r, err -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (o *Operation) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &o.OperationProps); err != nil { - return err - } - return json.Unmarshal(data, &o.VendorExtensible) -} - -// MarshalJSON converts this items object to JSON -func (o Operation) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(o.OperationProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(o.VendorExtensible) - if err != nil { - return nil, err - } - concated := swag.ConcatJSON(b1, b2) - return concated, nil -} - -// NewOperation creates a new operation instance. -// It expects an ID as parameter but not passing an ID is also valid. -func NewOperation(id string) *Operation { - op := new(Operation) - op.ID = id - return op -} - -// WithID sets the ID property on this operation, allows for chaining. -func (o *Operation) WithID(id string) *Operation { - o.ID = id - return o -} - -// WithDescription sets the description on this operation, allows for chaining -func (o *Operation) WithDescription(description string) *Operation { - o.Description = description - return o -} - -// WithSummary sets the summary on this operation, allows for chaining -func (o *Operation) WithSummary(summary string) *Operation { - o.Summary = summary - return o -} - -// WithExternalDocs sets/removes the external docs for/from this operation. -// When you pass empty strings as params the external documents will be removed. -// When you pass non-empty string as one value then those values will be used on the external docs object. -// So when you pass a non-empty description, you should also pass the url and vice versa. -func (o *Operation) WithExternalDocs(description, url string) *Operation { - if description == "" && url == "" { - o.ExternalDocs = nil - return o - } - - if o.ExternalDocs == nil { - o.ExternalDocs = &ExternalDocumentation{} - } - o.ExternalDocs.Description = description - o.ExternalDocs.URL = url - return o -} - -// Deprecate marks the operation as deprecated -func (o *Operation) Deprecate() *Operation { - o.Deprecated = true - return o -} - -// Undeprecate marks the operation as not deprected -func (o *Operation) Undeprecate() *Operation { - o.Deprecated = false - return o -} - -// WithConsumes adds media types for incoming body values -func (o *Operation) WithConsumes(mediaTypes ...string) *Operation { - o.Consumes = append(o.Consumes, mediaTypes...) - return o -} - -// WithProduces adds media types for outgoing body values -func (o *Operation) WithProduces(mediaTypes ...string) *Operation { - o.Produces = append(o.Produces, mediaTypes...) - return o -} - -// WithTags adds tags for this operation -func (o *Operation) WithTags(tags ...string) *Operation { - o.Tags = append(o.Tags, tags...) - return o -} - -// AddParam adds a parameter to this operation, when a parameter for that location -// and with that name already exists it will be replaced -func (o *Operation) AddParam(param *Parameter) *Operation { - if param == nil { - return o - } - - for i, p := range o.Parameters { - if p.Name == param.Name && p.In == param.In { - params := append(o.Parameters[:i], *param) - params = append(params, o.Parameters[i+1:]...) - o.Parameters = params - return o - } - } - - o.Parameters = append(o.Parameters, *param) - return o -} - -// RemoveParam removes a parameter from the operation -func (o *Operation) RemoveParam(name, in string) *Operation { - for i, p := range o.Parameters { - if p.Name == name && p.In == in { - o.Parameters = append(o.Parameters[:i], o.Parameters[i+1:]...) - return o - } - } - return o -} - -// SecuredWith adds a security scope to this operation. -func (o *Operation) SecuredWith(name string, scopes ...string) *Operation { - o.Security = append(o.Security, map[string][]string{name: scopes}) - return o -} - -// WithDefaultResponse adds a default response to the operation. -// Passing a nil value will remove the response -func (o *Operation) WithDefaultResponse(response *Response) *Operation { - return o.RespondsWith(0, response) -} - -// RespondsWith adds a status code response to the operation. -// When the code is 0 the value of the response will be used as default response value. -// When the value of the response is nil it will be removed from the operation -func (o *Operation) RespondsWith(code int, response *Response) *Operation { - if o.Responses == nil { - o.Responses = new(Responses) - } - if code == 0 { - o.Responses.Default = response - return o - } - if response == nil { - delete(o.Responses.StatusCodeResponses, code) - return o - } - if o.Responses.StatusCodeResponses == nil { - o.Responses.StatusCodeResponses = make(map[int]Response) - } - o.Responses.StatusCodeResponses[code] = *response - return o -} - -type opsAlias OperationProps - -type gobAlias struct { - Security []map[string]struct { - List []string - Pad bool - } - Alias *opsAlias - SecurityIsEmpty bool -} - -// GobEncode provides a safe gob encoder for Operation, including empty security requirements -func (o Operation) GobEncode() ([]byte, error) { - raw := struct { - Ext VendorExtensible - Props OperationProps - }{ - Ext: o.VendorExtensible, - Props: o.OperationProps, - } - var b bytes.Buffer - err := gob.NewEncoder(&b).Encode(raw) - return b.Bytes(), err -} - -// GobDecode provides a safe gob decoder for Operation, including empty security requirements -func (o *Operation) GobDecode(b []byte) error { - var raw struct { - Ext VendorExtensible - Props OperationProps - } - - buf := bytes.NewBuffer(b) - err := gob.NewDecoder(buf).Decode(&raw) - if err != nil { - return err - } - o.VendorExtensible = raw.Ext - o.OperationProps = raw.Props - return nil -} - -// GobEncode provides a safe gob encoder for Operation, including empty security requirements -func (op OperationProps) GobEncode() ([]byte, error) { - raw := gobAlias{ - Alias: (*opsAlias)(&op), - } - - var b bytes.Buffer - if op.Security == nil { - // nil security requirement - err := gob.NewEncoder(&b).Encode(raw) - return b.Bytes(), err - } - - if len(op.Security) == 0 { - // empty, but non-nil security requirement - raw.SecurityIsEmpty = true - raw.Alias.Security = nil - err := gob.NewEncoder(&b).Encode(raw) - return b.Bytes(), err - } - - raw.Security = make([]map[string]struct { - List []string - Pad bool - }, 0, len(op.Security)) - for _, req := range op.Security { - v := make(map[string]struct { - List []string - Pad bool - }, len(req)) - for k, val := range req { - v[k] = struct { - List []string - Pad bool - }{ - List: val, - } - } - raw.Security = append(raw.Security, v) - } - - err := gob.NewEncoder(&b).Encode(raw) - return b.Bytes(), err -} - -// GobDecode provides a safe gob decoder for Operation, including empty security requirements -func (op *OperationProps) GobDecode(b []byte) error { - var raw gobAlias - - buf := bytes.NewBuffer(b) - err := gob.NewDecoder(buf).Decode(&raw) - if err != nil { - return err - } - if raw.Alias == nil { - return nil - } - - switch { - case raw.SecurityIsEmpty: - // empty, but non-nil security requirement - raw.Alias.Security = []map[string][]string{} - case len(raw.Alias.Security) == 0: - // nil security requirement - raw.Alias.Security = nil - default: - raw.Alias.Security = make([]map[string][]string, 0, len(raw.Security)) - for _, req := range raw.Security { - v := make(map[string][]string, len(req)) - for k, val := range req { - v[k] = make([]string, 0, len(val.List)) - v[k] = append(v[k], val.List...) - } - raw.Alias.Security = append(raw.Alias.Security, v) - } - } - - *op = *(*OperationProps)(raw.Alias) - return nil -} diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/parameter.go b/cmd/bpa-operator/vendor/github.com/go-openapi/spec/parameter.go deleted file mode 100644 index cecdff5..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/parameter.go +++ /dev/null @@ -1,321 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "strings" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -// QueryParam creates a query parameter -func QueryParam(name string) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name, In: "query"}} -} - -// HeaderParam creates a header parameter, this is always required by default -func HeaderParam(name string) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name, In: "header", Required: true}} -} - -// PathParam creates a path parameter, this is always required -func PathParam(name string) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name, In: "path", Required: true}} -} - -// BodyParam creates a body parameter -func BodyParam(name string, schema *Schema) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name, In: "body", Schema: schema}, - SimpleSchema: SimpleSchema{Type: "object"}} -} - -// FormDataParam creates a body parameter -func FormDataParam(name string) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"}} -} - -// FileParam creates a body parameter -func FileParam(name string) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"}, - SimpleSchema: SimpleSchema{Type: "file"}} -} - -// SimpleArrayParam creates a param for a simple array (string, int, date etc) -func SimpleArrayParam(name, tpe, fmt string) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name}, - SimpleSchema: SimpleSchema{Type: jsonArray, CollectionFormat: "csv", - Items: &Items{SimpleSchema: SimpleSchema{Type: "string", Format: fmt}}}} -} - -// ParamRef creates a parameter that's a json reference -func ParamRef(uri string) *Parameter { - p := new(Parameter) - p.Ref = MustCreateRef(uri) - return p -} - -// ParamProps describes the specific attributes of an operation parameter -// -// NOTE: -// - Schema is defined when "in" == "body": see validate -// - AllowEmptyValue is allowed where "in" == "query" || "formData" -type ParamProps struct { - Description string `json:"description,omitempty"` - Name string `json:"name,omitempty"` - In string `json:"in,omitempty"` - Required bool `json:"required,omitempty"` - Schema *Schema `json:"schema,omitempty"` - AllowEmptyValue bool `json:"allowEmptyValue,omitempty"` -} - -// Parameter a unique parameter is defined by a combination of a [name](#parameterName) and [location](#parameterIn). -// -// There are five possible parameter types. -// * Path - Used together with [Path Templating](#pathTemplating), where the parameter value is actually part -// of the operation's URL. This does not include the host or base path of the API. For example, in `/items/{itemId}`, -// the path parameter is `itemId`. -// * Query - Parameters that are appended to the URL. For example, in `/items?id=###`, the query parameter is `id`. -// * Header - Custom headers that are expected as part of the request. -// * Body - The payload that's appended to the HTTP request. Since there can only be one payload, there can only be -// _one_ body parameter. The name of the body parameter has no effect on the parameter itself and is used for -// documentation purposes only. Since Form parameters are also in the payload, body and form parameters cannot exist -// together for the same operation. -// * Form - Used to describe the payload of an HTTP request when either `application/x-www-form-urlencoded` or -// `multipart/form-data` are used as the content type of the request (in Swagger's definition, -// the [`consumes`](#operationConsumes) property of an operation). This is the only parameter type that can be used -// to send files, thus supporting the `file` type. Since form parameters are sent in the payload, they cannot be -// declared together with a body parameter for the same operation. Form parameters have a different format based on -// the content-type used (for further details, consult http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4). -// * `application/x-www-form-urlencoded` - Similar to the format of Query parameters but as a payload. -// For example, `foo=1&bar=swagger` - both `foo` and `bar` are form parameters. This is normally used for simple -// parameters that are being transferred. -// * `multipart/form-data` - each parameter takes a section in the payload with an internal header. -// For example, for the header `Content-Disposition: form-data; name="submit-name"` the name of the parameter is -// `submit-name`. This type of form parameters is more commonly used for file transfers. -// -// For more information: http://goo.gl/8us55a#parameterObject -type Parameter struct { - Refable - CommonValidations - SimpleSchema - VendorExtensible - ParamProps -} - -// JSONLookup look up a value by the json property name -func (p Parameter) JSONLookup(token string) (interface{}, error) { - if ex, ok := p.Extensions[token]; ok { - return &ex, nil - } - if token == jsonRef { - return &p.Ref, nil - } - - r, _, err := jsonpointer.GetForToken(p.CommonValidations, token) - if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { - return nil, err - } - if r != nil { - return r, nil - } - r, _, err = jsonpointer.GetForToken(p.SimpleSchema, token) - if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { - return nil, err - } - if r != nil { - return r, nil - } - r, _, err = jsonpointer.GetForToken(p.ParamProps, token) - return r, err -} - -// WithDescription a fluent builder method for the description of the parameter -func (p *Parameter) WithDescription(description string) *Parameter { - p.Description = description - return p -} - -// Named a fluent builder method to override the name of the parameter -func (p *Parameter) Named(name string) *Parameter { - p.Name = name - return p -} - -// WithLocation a fluent builder method to override the location of the parameter -func (p *Parameter) WithLocation(in string) *Parameter { - p.In = in - return p -} - -// Typed a fluent builder method for the type of the parameter value -func (p *Parameter) Typed(tpe, format string) *Parameter { - p.Type = tpe - p.Format = format - return p -} - -// CollectionOf a fluent builder method for an array parameter -func (p *Parameter) CollectionOf(items *Items, format string) *Parameter { - p.Type = jsonArray - p.Items = items - p.CollectionFormat = format - return p -} - -// WithDefault sets the default value on this parameter -func (p *Parameter) WithDefault(defaultValue interface{}) *Parameter { - p.AsOptional() // with default implies optional - p.Default = defaultValue - return p -} - -// AllowsEmptyValues flags this parameter as being ok with empty values -func (p *Parameter) AllowsEmptyValues() *Parameter { - p.AllowEmptyValue = true - return p -} - -// NoEmptyValues flags this parameter as not liking empty values -func (p *Parameter) NoEmptyValues() *Parameter { - p.AllowEmptyValue = false - return p -} - -// AsOptional flags this parameter as optional -func (p *Parameter) AsOptional() *Parameter { - p.Required = false - return p -} - -// AsRequired flags this parameter as required -func (p *Parameter) AsRequired() *Parameter { - if p.Default != nil { // with a default required makes no sense - return p - } - p.Required = true - return p -} - -// WithMaxLength sets a max length value -func (p *Parameter) WithMaxLength(max int64) *Parameter { - p.MaxLength = &max - return p -} - -// WithMinLength sets a min length value -func (p *Parameter) WithMinLength(min int64) *Parameter { - p.MinLength = &min - return p -} - -// WithPattern sets a pattern value -func (p *Parameter) WithPattern(pattern string) *Parameter { - p.Pattern = pattern - return p -} - -// WithMultipleOf sets a multiple of value -func (p *Parameter) WithMultipleOf(number float64) *Parameter { - p.MultipleOf = &number - return p -} - -// WithMaximum sets a maximum number value -func (p *Parameter) WithMaximum(max float64, exclusive bool) *Parameter { - p.Maximum = &max - p.ExclusiveMaximum = exclusive - return p -} - -// WithMinimum sets a minimum number value -func (p *Parameter) WithMinimum(min float64, exclusive bool) *Parameter { - p.Minimum = &min - p.ExclusiveMinimum = exclusive - return p -} - -// WithEnum sets a the enum values (replace) -func (p *Parameter) WithEnum(values ...interface{}) *Parameter { - p.Enum = append([]interface{}{}, values...) - return p -} - -// WithMaxItems sets the max items -func (p *Parameter) WithMaxItems(size int64) *Parameter { - p.MaxItems = &size - return p -} - -// WithMinItems sets the min items -func (p *Parameter) WithMinItems(size int64) *Parameter { - p.MinItems = &size - return p -} - -// UniqueValues dictates that this array can only have unique items -func (p *Parameter) UniqueValues() *Parameter { - p.UniqueItems = true - return p -} - -// AllowDuplicates this array can have duplicates -func (p *Parameter) AllowDuplicates() *Parameter { - p.UniqueItems = false - return p -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (p *Parameter) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &p.CommonValidations); err != nil { - return err - } - if err := json.Unmarshal(data, &p.Refable); err != nil { - return err - } - if err := json.Unmarshal(data, &p.SimpleSchema); err != nil { - return err - } - if err := json.Unmarshal(data, &p.VendorExtensible); err != nil { - return err - } - return json.Unmarshal(data, &p.ParamProps) -} - -// MarshalJSON converts this items object to JSON -func (p Parameter) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(p.CommonValidations) - if err != nil { - return nil, err - } - b2, err := json.Marshal(p.SimpleSchema) - if err != nil { - return nil, err - } - b3, err := json.Marshal(p.Refable) - if err != nil { - return nil, err - } - b4, err := json.Marshal(p.VendorExtensible) - if err != nil { - return nil, err - } - b5, err := json.Marshal(p.ParamProps) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b3, b1, b2, b4, b5), nil -} diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/path_item.go b/cmd/bpa-operator/vendor/github.com/go-openapi/spec/path_item.go deleted file mode 100644 index 68fc8e9..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/path_item.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -// PathItemProps the path item specific properties -type PathItemProps struct { - Get *Operation `json:"get,omitempty"` - Put *Operation `json:"put,omitempty"` - Post *Operation `json:"post,omitempty"` - Delete *Operation `json:"delete,omitempty"` - Options *Operation `json:"options,omitempty"` - Head *Operation `json:"head,omitempty"` - Patch *Operation `json:"patch,omitempty"` - Parameters []Parameter `json:"parameters,omitempty"` -} - -// PathItem describes the operations available on a single path. -// A Path Item may be empty, due to [ACL constraints](http://goo.gl/8us55a#securityFiltering). -// The path itself is still exposed to the documentation viewer but they will -// not know which operations and parameters are available. -// -// For more information: http://goo.gl/8us55a#pathItemObject -type PathItem struct { - Refable - VendorExtensible - PathItemProps -} - -// JSONLookup look up a value by the json property name -func (p PathItem) JSONLookup(token string) (interface{}, error) { - if ex, ok := p.Extensions[token]; ok { - return &ex, nil - } - if token == jsonRef { - return &p.Ref, nil - } - r, _, err := jsonpointer.GetForToken(p.PathItemProps, token) - return r, err -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (p *PathItem) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &p.Refable); err != nil { - return err - } - if err := json.Unmarshal(data, &p.VendorExtensible); err != nil { - return err - } - return json.Unmarshal(data, &p.PathItemProps) -} - -// MarshalJSON converts this items object to JSON -func (p PathItem) MarshalJSON() ([]byte, error) { - b3, err := json.Marshal(p.Refable) - if err != nil { - return nil, err - } - b4, err := json.Marshal(p.VendorExtensible) - if err != nil { - return nil, err - } - b5, err := json.Marshal(p.PathItemProps) - if err != nil { - return nil, err - } - concated := swag.ConcatJSON(b3, b4, b5) - return concated, nil -} diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/paths.go b/cmd/bpa-operator/vendor/github.com/go-openapi/spec/paths.go deleted file mode 100644 index 9dc82a2..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/paths.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "fmt" - "strings" - - "github.com/go-openapi/swag" -) - -// Paths holds the relative paths to the individual endpoints. -// The path is appended to the [`basePath`](http://goo.gl/8us55a#swaggerBasePath) in order -// to construct the full URL. -// The Paths may be empty, due to [ACL constraints](http://goo.gl/8us55a#securityFiltering). -// -// For more information: http://goo.gl/8us55a#pathsObject -type Paths struct { - VendorExtensible - Paths map[string]PathItem `json:"-"` // custom serializer to flatten this, each entry must start with "/" -} - -// JSONLookup look up a value by the json property name -func (p Paths) JSONLookup(token string) (interface{}, error) { - if pi, ok := p.Paths[token]; ok { - return &pi, nil - } - if ex, ok := p.Extensions[token]; ok { - return &ex, nil - } - return nil, fmt.Errorf("object has no field %q", token) -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (p *Paths) UnmarshalJSON(data []byte) error { - var res map[string]json.RawMessage - if err := json.Unmarshal(data, &res); err != nil { - return err - } - for k, v := range res { - if strings.HasPrefix(strings.ToLower(k), "x-") { - if p.Extensions == nil { - p.Extensions = make(map[string]interface{}) - } - var d interface{} - if err := json.Unmarshal(v, &d); err != nil { - return err - } - p.Extensions[k] = d - } - if strings.HasPrefix(k, "/") { - if p.Paths == nil { - p.Paths = make(map[string]PathItem) - } - var pi PathItem - if err := json.Unmarshal(v, &pi); err != nil { - return err - } - p.Paths[k] = pi - } - } - return nil -} - -// MarshalJSON converts this items object to JSON -func (p Paths) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(p.VendorExtensible) - if err != nil { - return nil, err - } - - pths := make(map[string]PathItem) - for k, v := range p.Paths { - if strings.HasPrefix(k, "/") { - pths[k] = v - } - } - b2, err := json.Marshal(pths) - if err != nil { - return nil, err - } - concated := swag.ConcatJSON(b1, b2) - return concated, nil -} diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/ref.go b/cmd/bpa-operator/vendor/github.com/go-openapi/spec/ref.go deleted file mode 100644 index 08ff869..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/ref.go +++ /dev/null @@ -1,191 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "bytes" - "encoding/gob" - "encoding/json" - "net/http" - "os" - "path/filepath" - - "github.com/go-openapi/jsonreference" -) - -// Refable is a struct for things that accept a $ref property -type Refable struct { - Ref Ref -} - -// MarshalJSON marshals the ref to json -func (r Refable) MarshalJSON() ([]byte, error) { - return r.Ref.MarshalJSON() -} - -// UnmarshalJSON unmarshalss the ref from json -func (r *Refable) UnmarshalJSON(d []byte) error { - return json.Unmarshal(d, &r.Ref) -} - -// Ref represents a json reference that is potentially resolved -type Ref struct { - jsonreference.Ref -} - -// RemoteURI gets the remote uri part of the ref -func (r *Ref) RemoteURI() string { - if r.String() == "" { - return r.String() - } - - u := *r.GetURL() - u.Fragment = "" - return u.String() -} - -// IsValidURI returns true when the url the ref points to can be found -func (r *Ref) IsValidURI(basepaths ...string) bool { - if r.String() == "" { - return true - } - - v := r.RemoteURI() - if v == "" { - return true - } - - if r.HasFullURL { - rr, err := http.Get(v) - if err != nil { - return false - } - - return rr.StatusCode/100 == 2 - } - - if !(r.HasFileScheme || r.HasFullFilePath || r.HasURLPathOnly) { - return false - } - - // check for local file - pth := v - if r.HasURLPathOnly { - base := "." - if len(basepaths) > 0 { - base = filepath.Dir(filepath.Join(basepaths...)) - } - p, e := filepath.Abs(filepath.ToSlash(filepath.Join(base, pth))) - if e != nil { - return false - } - pth = p - } - - fi, err := os.Stat(filepath.ToSlash(pth)) - if err != nil { - return false - } - - return !fi.IsDir() -} - -// Inherits creates a new reference from a parent and a child -// If the child cannot inherit from the parent, an error is returned -func (r *Ref) Inherits(child Ref) (*Ref, error) { - ref, err := r.Ref.Inherits(child.Ref) - if err != nil { - return nil, err - } - return &Ref{Ref: *ref}, nil -} - -// NewRef creates a new instance of a ref object -// returns an error when the reference uri is an invalid uri -func NewRef(refURI string) (Ref, error) { - ref, err := jsonreference.New(refURI) - if err != nil { - return Ref{}, err - } - return Ref{Ref: ref}, nil -} - -// MustCreateRef creates a ref object but panics when refURI is invalid. -// Use the NewRef method for a version that returns an error. -func MustCreateRef(refURI string) Ref { - return Ref{Ref: jsonreference.MustCreateRef(refURI)} -} - -// MarshalJSON marshals this ref into a JSON object -func (r Ref) MarshalJSON() ([]byte, error) { - str := r.String() - if str == "" { - if r.IsRoot() { - return []byte(`{"$ref":""}`), nil - } - return []byte("{}"), nil - } - v := map[string]interface{}{"$ref": str} - return json.Marshal(v) -} - -// UnmarshalJSON unmarshals this ref from a JSON object -func (r *Ref) UnmarshalJSON(d []byte) error { - var v map[string]interface{} - if err := json.Unmarshal(d, &v); err != nil { - return err - } - return r.fromMap(v) -} - -// GobEncode provides a safe gob encoder for Ref -func (r Ref) GobEncode() ([]byte, error) { - var b bytes.Buffer - raw, err := r.MarshalJSON() - if err != nil { - return nil, err - } - err = gob.NewEncoder(&b).Encode(raw) - return b.Bytes(), err -} - -// GobDecode provides a safe gob decoder for Ref -func (r *Ref) GobDecode(b []byte) error { - var raw []byte - buf := bytes.NewBuffer(b) - err := gob.NewDecoder(buf).Decode(&raw) - if err != nil { - return err - } - return json.Unmarshal(raw, r) -} - -func (r *Ref) fromMap(v map[string]interface{}) error { - if v == nil { - return nil - } - - if vv, ok := v["$ref"]; ok { - if str, ok := vv.(string); ok { - ref, err := jsonreference.New(str) - if err != nil { - return err - } - *r = Ref{Ref: ref} - } - } - - return nil -} diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/response.go b/cmd/bpa-operator/vendor/github.com/go-openapi/spec/response.go deleted file mode 100644 index 27729c1..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/response.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -// ResponseProps properties specific to a response -type ResponseProps struct { - Description string `json:"description,omitempty"` - Schema *Schema `json:"schema,omitempty"` - Headers map[string]Header `json:"headers,omitempty"` - Examples map[string]interface{} `json:"examples,omitempty"` -} - -// Response describes a single response from an API Operation. -// -// For more information: http://goo.gl/8us55a#responseObject -type Response struct { - Refable - ResponseProps - VendorExtensible -} - -// JSONLookup look up a value by the json property name -func (r Response) JSONLookup(token string) (interface{}, error) { - if ex, ok := r.Extensions[token]; ok { - return &ex, nil - } - if token == "$ref" { - return &r.Ref, nil - } - ptr, _, err := jsonpointer.GetForToken(r.ResponseProps, token) - return ptr, err -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (r *Response) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &r.ResponseProps); err != nil { - return err - } - if err := json.Unmarshal(data, &r.Refable); err != nil { - return err - } - return json.Unmarshal(data, &r.VendorExtensible) -} - -// MarshalJSON converts this items object to JSON -func (r Response) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(r.ResponseProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(r.Refable) - if err != nil { - return nil, err - } - b3, err := json.Marshal(r.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2, b3), nil -} - -// NewResponse creates a new response instance -func NewResponse() *Response { - return new(Response) -} - -// ResponseRef creates a response as a json reference -func ResponseRef(url string) *Response { - resp := NewResponse() - resp.Ref = MustCreateRef(url) - return resp -} - -// WithDescription sets the description on this response, allows for chaining -func (r *Response) WithDescription(description string) *Response { - r.Description = description - return r -} - -// WithSchema sets the schema on this response, allows for chaining. -// Passing a nil argument removes the schema from this response -func (r *Response) WithSchema(schema *Schema) *Response { - r.Schema = schema - return r -} - -// AddHeader adds a header to this response -func (r *Response) AddHeader(name string, header *Header) *Response { - if header == nil { - return r.RemoveHeader(name) - } - if r.Headers == nil { - r.Headers = make(map[string]Header) - } - r.Headers[name] = *header - return r -} - -// RemoveHeader removes a header from this response -func (r *Response) RemoveHeader(name string) *Response { - delete(r.Headers, name) - return r -} - -// AddExample adds an example to this response -func (r *Response) AddExample(mediaType string, example interface{}) *Response { - if r.Examples == nil { - r.Examples = make(map[string]interface{}) - } - r.Examples[mediaType] = example - return r -} diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/responses.go b/cmd/bpa-operator/vendor/github.com/go-openapi/spec/responses.go deleted file mode 100644 index 4efb6f8..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/responses.go +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "fmt" - "reflect" - "strconv" - - "github.com/go-openapi/swag" -) - -// Responses is a container for the expected responses of an operation. -// The container maps a HTTP response code to the expected response. -// It is not expected from the documentation to necessarily cover all possible HTTP response codes, -// since they may not be known in advance. However, it is expected from the documentation to cover -// a successful operation response and any known errors. -// -// The `default` can be used a default response object for all HTTP codes that are not covered -// individually by the specification. -// -// The `Responses Object` MUST contain at least one response code, and it SHOULD be the response -// for a successful operation call. -// -// For more information: http://goo.gl/8us55a#responsesObject -type Responses struct { - VendorExtensible - ResponsesProps -} - -// JSONLookup implements an interface to customize json pointer lookup -func (r Responses) JSONLookup(token string) (interface{}, error) { - if token == "default" { - return r.Default, nil - } - if ex, ok := r.Extensions[token]; ok { - return &ex, nil - } - if i, err := strconv.Atoi(token); err == nil { - if scr, ok := r.StatusCodeResponses[i]; ok { - return scr, nil - } - } - return nil, fmt.Errorf("object has no field %q", token) -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (r *Responses) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &r.ResponsesProps); err != nil { - return err - } - if err := json.Unmarshal(data, &r.VendorExtensible); err != nil { - return err - } - if reflect.DeepEqual(ResponsesProps{}, r.ResponsesProps) { - r.ResponsesProps = ResponsesProps{} - } - return nil -} - -// MarshalJSON converts this items object to JSON -func (r Responses) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(r.ResponsesProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(r.VendorExtensible) - if err != nil { - return nil, err - } - concated := swag.ConcatJSON(b1, b2) - return concated, nil -} - -// ResponsesProps describes all responses for an operation. -// It tells what is the default response and maps all responses with a -// HTTP status code. -type ResponsesProps struct { - Default *Response - StatusCodeResponses map[int]Response -} - -// MarshalJSON marshals responses as JSON -func (r ResponsesProps) MarshalJSON() ([]byte, error) { - toser := map[string]Response{} - if r.Default != nil { - toser["default"] = *r.Default - } - for k, v := range r.StatusCodeResponses { - toser[strconv.Itoa(k)] = v - } - return json.Marshal(toser) -} - -// UnmarshalJSON unmarshals responses from JSON -func (r *ResponsesProps) UnmarshalJSON(data []byte) error { - var res map[string]Response - if err := json.Unmarshal(data, &res); err != nil { - return nil - } - if v, ok := res["default"]; ok { - r.Default = &v - delete(res, "default") - } - for k, v := range res { - if nk, err := strconv.Atoi(k); err == nil { - if r.StatusCodeResponses == nil { - r.StatusCodeResponses = map[int]Response{} - } - r.StatusCodeResponses[nk] = v - } - } - return nil -} diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/schema.go b/cmd/bpa-operator/vendor/github.com/go-openapi/spec/schema.go deleted file mode 100644 index ce30d26..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/schema.go +++ /dev/null @@ -1,589 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "fmt" - "net/url" - "strings" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -// BooleanProperty creates a boolean property -func BooleanProperty() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"boolean"}}} -} - -// BoolProperty creates a boolean property -func BoolProperty() *Schema { return BooleanProperty() } - -// StringProperty creates a string property -func StringProperty() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}} -} - -// CharProperty creates a string property -func CharProperty() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}} -} - -// Float64Property creates a float64/double property -func Float64Property() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"number"}, Format: "double"}} -} - -// Float32Property creates a float32/float property -func Float32Property() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"number"}, Format: "float"}} -} - -// Int8Property creates an int8 property -func Int8Property() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int8"}} -} - -// Int16Property creates an int16 property -func Int16Property() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int16"}} -} - -// Int32Property creates an int32 property -func Int32Property() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int32"}} -} - -// Int64Property creates an int64 property -func Int64Property() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int64"}} -} - -// StrFmtProperty creates a property for the named string format -func StrFmtProperty(format string) *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: format}} -} - -// DateProperty creates a date property -func DateProperty() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: "date"}} -} - -// DateTimeProperty creates a date time property -func DateTimeProperty() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: "date-time"}} -} - -// MapProperty creates a map property -func MapProperty(property *Schema) *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"object"}, - AdditionalProperties: &SchemaOrBool{Allows: true, Schema: property}}} -} - -// RefProperty creates a ref property -func RefProperty(name string) *Schema { - return &Schema{SchemaProps: SchemaProps{Ref: MustCreateRef(name)}} -} - -// RefSchema creates a ref property -func RefSchema(name string) *Schema { - return &Schema{SchemaProps: SchemaProps{Ref: MustCreateRef(name)}} -} - -// ArrayProperty creates an array property -func ArrayProperty(items *Schema) *Schema { - if items == nil { - return &Schema{SchemaProps: SchemaProps{Type: []string{"array"}}} - } - return &Schema{SchemaProps: SchemaProps{Items: &SchemaOrArray{Schema: items}, Type: []string{"array"}}} -} - -// ComposedSchema creates a schema with allOf -func ComposedSchema(schemas ...Schema) *Schema { - s := new(Schema) - s.AllOf = schemas - return s -} - -// SchemaURL represents a schema url -type SchemaURL string - -// MarshalJSON marshal this to JSON -func (r SchemaURL) MarshalJSON() ([]byte, error) { - if r == "" { - return []byte("{}"), nil - } - v := map[string]interface{}{"$schema": string(r)} - return json.Marshal(v) -} - -// UnmarshalJSON unmarshal this from JSON -func (r *SchemaURL) UnmarshalJSON(data []byte) error { - var v map[string]interface{} - if err := json.Unmarshal(data, &v); err != nil { - return err - } - return r.fromMap(v) -} - -func (r *SchemaURL) fromMap(v map[string]interface{}) error { - if v == nil { - return nil - } - if vv, ok := v["$schema"]; ok { - if str, ok := vv.(string); ok { - u, err := url.Parse(str) - if err != nil { - return err - } - - *r = SchemaURL(u.String()) - } - } - return nil -} - -// SchemaProps describes a JSON schema (draft 4) -type SchemaProps struct { - ID string `json:"id,omitempty"` - Ref Ref `json:"-"` - Schema SchemaURL `json:"-"` - Description string `json:"description,omitempty"` - Type StringOrArray `json:"type,omitempty"` - Format string `json:"format,omitempty"` - Title string `json:"title,omitempty"` - Default interface{} `json:"default,omitempty"` - Maximum *float64 `json:"maximum,omitempty"` - ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"` - Minimum *float64 `json:"minimum,omitempty"` - ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty"` - MaxLength *int64 `json:"maxLength,omitempty"` - MinLength *int64 `json:"minLength,omitempty"` - Pattern string `json:"pattern,omitempty"` - MaxItems *int64 `json:"maxItems,omitempty"` - MinItems *int64 `json:"minItems,omitempty"` - UniqueItems bool `json:"uniqueItems,omitempty"` - MultipleOf *float64 `json:"multipleOf,omitempty"` - Enum []interface{} `json:"enum,omitempty"` - MaxProperties *int64 `json:"maxProperties,omitempty"` - MinProperties *int64 `json:"minProperties,omitempty"` - Required []string `json:"required,omitempty"` - Items *SchemaOrArray `json:"items,omitempty"` - AllOf []Schema `json:"allOf,omitempty"` - OneOf []Schema `json:"oneOf,omitempty"` - AnyOf []Schema `json:"anyOf,omitempty"` - Not *Schema `json:"not,omitempty"` - Properties map[string]Schema `json:"properties,omitempty"` - AdditionalProperties *SchemaOrBool `json:"additionalProperties,omitempty"` - PatternProperties map[string]Schema `json:"patternProperties,omitempty"` - Dependencies Dependencies `json:"dependencies,omitempty"` - AdditionalItems *SchemaOrBool `json:"additionalItems,omitempty"` - Definitions Definitions `json:"definitions,omitempty"` -} - -// SwaggerSchemaProps are additional properties supported by swagger schemas, but not JSON-schema (draft 4) -type SwaggerSchemaProps struct { - Discriminator string `json:"discriminator,omitempty"` - ReadOnly bool `json:"readOnly,omitempty"` - XML *XMLObject `json:"xml,omitempty"` - ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` - Example interface{} `json:"example,omitempty"` -} - -// Schema the schema object allows the definition of input and output data types. -// These types can be objects, but also primitives and arrays. -// This object is based on the [JSON Schema Specification Draft 4](http://json-schema.org/) -// and uses a predefined subset of it. -// On top of this subset, there are extensions provided by this specification to allow for more complete documentation. -// -// For more information: http://goo.gl/8us55a#schemaObject -type Schema struct { - VendorExtensible - SchemaProps - SwaggerSchemaProps - ExtraProps map[string]interface{} `json:"-"` -} - -// JSONLookup implements an interface to customize json pointer lookup -func (s Schema) JSONLookup(token string) (interface{}, error) { - if ex, ok := s.Extensions[token]; ok { - return &ex, nil - } - - if ex, ok := s.ExtraProps[token]; ok { - return &ex, nil - } - - r, _, err := jsonpointer.GetForToken(s.SchemaProps, token) - if r != nil || (err != nil && !strings.HasPrefix(err.Error(), "object has no field")) { - return r, err - } - r, _, err = jsonpointer.GetForToken(s.SwaggerSchemaProps, token) - return r, err -} - -// WithID sets the id for this schema, allows for chaining -func (s *Schema) WithID(id string) *Schema { - s.ID = id - return s -} - -// WithTitle sets the title for this schema, allows for chaining -func (s *Schema) WithTitle(title string) *Schema { - s.Title = title - return s -} - -// WithDescription sets the description for this schema, allows for chaining -func (s *Schema) WithDescription(description string) *Schema { - s.Description = description - return s -} - -// WithProperties sets the properties for this schema -func (s *Schema) WithProperties(schemas map[string]Schema) *Schema { - s.Properties = schemas - return s -} - -// SetProperty sets a property on this schema -func (s *Schema) SetProperty(name string, schema Schema) *Schema { - if s.Properties == nil { - s.Properties = make(map[string]Schema) - } - s.Properties[name] = schema - return s -} - -// WithAllOf sets the all of property -func (s *Schema) WithAllOf(schemas ...Schema) *Schema { - s.AllOf = schemas - return s -} - -// WithMaxProperties sets the max number of properties an object can have -func (s *Schema) WithMaxProperties(max int64) *Schema { - s.MaxProperties = &max - return s -} - -// WithMinProperties sets the min number of properties an object must have -func (s *Schema) WithMinProperties(min int64) *Schema { - s.MinProperties = &min - return s -} - -// Typed sets the type of this schema for a single value item -func (s *Schema) Typed(tpe, format string) *Schema { - s.Type = []string{tpe} - s.Format = format - return s -} - -// AddType adds a type with potential format to the types for this schema -func (s *Schema) AddType(tpe, format string) *Schema { - s.Type = append(s.Type, tpe) - if format != "" { - s.Format = format - } - return s -} - -// CollectionOf a fluent builder method for an array parameter -func (s *Schema) CollectionOf(items Schema) *Schema { - s.Type = []string{jsonArray} - s.Items = &SchemaOrArray{Schema: &items} - return s -} - -// WithDefault sets the default value on this parameter -func (s *Schema) WithDefault(defaultValue interface{}) *Schema { - s.Default = defaultValue - return s -} - -// WithRequired flags this parameter as required -func (s *Schema) WithRequired(items ...string) *Schema { - s.Required = items - return s -} - -// AddRequired adds field names to the required properties array -func (s *Schema) AddRequired(items ...string) *Schema { - s.Required = append(s.Required, items...) - return s -} - -// WithMaxLength sets a max length value -func (s *Schema) WithMaxLength(max int64) *Schema { - s.MaxLength = &max - return s -} - -// WithMinLength sets a min length value -func (s *Schema) WithMinLength(min int64) *Schema { - s.MinLength = &min - return s -} - -// WithPattern sets a pattern value -func (s *Schema) WithPattern(pattern string) *Schema { - s.Pattern = pattern - return s -} - -// WithMultipleOf sets a multiple of value -func (s *Schema) WithMultipleOf(number float64) *Schema { - s.MultipleOf = &number - return s -} - -// WithMaximum sets a maximum number value -func (s *Schema) WithMaximum(max float64, exclusive bool) *Schema { - s.Maximum = &max - s.ExclusiveMaximum = exclusive - return s -} - -// WithMinimum sets a minimum number value -func (s *Schema) WithMinimum(min float64, exclusive bool) *Schema { - s.Minimum = &min - s.ExclusiveMinimum = exclusive - return s -} - -// WithEnum sets a the enum values (replace) -func (s *Schema) WithEnum(values ...interface{}) *Schema { - s.Enum = append([]interface{}{}, values...) - return s -} - -// WithMaxItems sets the max items -func (s *Schema) WithMaxItems(size int64) *Schema { - s.MaxItems = &size - return s -} - -// WithMinItems sets the min items -func (s *Schema) WithMinItems(size int64) *Schema { - s.MinItems = &size - return s -} - -// UniqueValues dictates that this array can only have unique items -func (s *Schema) UniqueValues() *Schema { - s.UniqueItems = true - return s -} - -// AllowDuplicates this array can have duplicates -func (s *Schema) AllowDuplicates() *Schema { - s.UniqueItems = false - return s -} - -// AddToAllOf adds a schema to the allOf property -func (s *Schema) AddToAllOf(schemas ...Schema) *Schema { - s.AllOf = append(s.AllOf, schemas...) - return s -} - -// WithDiscriminator sets the name of the discriminator field -func (s *Schema) WithDiscriminator(discriminator string) *Schema { - s.Discriminator = discriminator - return s -} - -// AsReadOnly flags this schema as readonly -func (s *Schema) AsReadOnly() *Schema { - s.ReadOnly = true - return s -} - -// AsWritable flags this schema as writeable (not read-only) -func (s *Schema) AsWritable() *Schema { - s.ReadOnly = false - return s -} - -// WithExample sets the example for this schema -func (s *Schema) WithExample(example interface{}) *Schema { - s.Example = example - return s -} - -// WithExternalDocs sets/removes the external docs for/from this schema. -// When you pass empty strings as params the external documents will be removed. -// When you pass non-empty string as one value then those values will be used on the external docs object. -// So when you pass a non-empty description, you should also pass the url and vice versa. -func (s *Schema) WithExternalDocs(description, url string) *Schema { - if description == "" && url == "" { - s.ExternalDocs = nil - return s - } - - if s.ExternalDocs == nil { - s.ExternalDocs = &ExternalDocumentation{} - } - s.ExternalDocs.Description = description - s.ExternalDocs.URL = url - return s -} - -// WithXMLName sets the xml name for the object -func (s *Schema) WithXMLName(name string) *Schema { - if s.XML == nil { - s.XML = new(XMLObject) - } - s.XML.Name = name - return s -} - -// WithXMLNamespace sets the xml namespace for the object -func (s *Schema) WithXMLNamespace(namespace string) *Schema { - if s.XML == nil { - s.XML = new(XMLObject) - } - s.XML.Namespace = namespace - return s -} - -// WithXMLPrefix sets the xml prefix for the object -func (s *Schema) WithXMLPrefix(prefix string) *Schema { - if s.XML == nil { - s.XML = new(XMLObject) - } - s.XML.Prefix = prefix - return s -} - -// AsXMLAttribute flags this object as xml attribute -func (s *Schema) AsXMLAttribute() *Schema { - if s.XML == nil { - s.XML = new(XMLObject) - } - s.XML.Attribute = true - return s -} - -// AsXMLElement flags this object as an xml node -func (s *Schema) AsXMLElement() *Schema { - if s.XML == nil { - s.XML = new(XMLObject) - } - s.XML.Attribute = false - return s -} - -// AsWrappedXML flags this object as wrapped, this is mostly useful for array types -func (s *Schema) AsWrappedXML() *Schema { - if s.XML == nil { - s.XML = new(XMLObject) - } - s.XML.Wrapped = true - return s -} - -// AsUnwrappedXML flags this object as an xml node -func (s *Schema) AsUnwrappedXML() *Schema { - if s.XML == nil { - s.XML = new(XMLObject) - } - s.XML.Wrapped = false - return s -} - -// MarshalJSON marshal this to JSON -func (s Schema) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(s.SchemaProps) - if err != nil { - return nil, fmt.Errorf("schema props %v", err) - } - b2, err := json.Marshal(s.VendorExtensible) - if err != nil { - return nil, fmt.Errorf("vendor props %v", err) - } - b3, err := s.Ref.MarshalJSON() - if err != nil { - return nil, fmt.Errorf("ref prop %v", err) - } - b4, err := s.Schema.MarshalJSON() - if err != nil { - return nil, fmt.Errorf("schema prop %v", err) - } - b5, err := json.Marshal(s.SwaggerSchemaProps) - if err != nil { - return nil, fmt.Errorf("common validations %v", err) - } - var b6 []byte - if s.ExtraProps != nil { - jj, err := json.Marshal(s.ExtraProps) - if err != nil { - return nil, fmt.Errorf("extra props %v", err) - } - b6 = jj - } - return swag.ConcatJSON(b1, b2, b3, b4, b5, b6), nil -} - -// UnmarshalJSON marshal this from JSON -func (s *Schema) UnmarshalJSON(data []byte) error { - props := struct { - SchemaProps - SwaggerSchemaProps - }{} - if err := json.Unmarshal(data, &props); err != nil { - return err - } - - sch := Schema{ - SchemaProps: props.SchemaProps, - SwaggerSchemaProps: props.SwaggerSchemaProps, - } - - var d map[string]interface{} - if err := json.Unmarshal(data, &d); err != nil { - return err - } - - _ = sch.Ref.fromMap(d) - _ = sch.Schema.fromMap(d) - - delete(d, "$ref") - delete(d, "$schema") - for _, pn := range swag.DefaultJSONNameProvider.GetJSONNames(s) { - delete(d, pn) - } - - for k, vv := range d { - lk := strings.ToLower(k) - if strings.HasPrefix(lk, "x-") { - if sch.Extensions == nil { - sch.Extensions = map[string]interface{}{} - } - sch.Extensions[k] = vv - continue - } - if sch.ExtraProps == nil { - sch.ExtraProps = map[string]interface{}{} - } - sch.ExtraProps[k] = vv - } - - *s = sch - - return nil -} diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/schema_loader.go b/cmd/bpa-operator/vendor/github.com/go-openapi/spec/schema_loader.go deleted file mode 100644 index c34a96f..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/schema_loader.go +++ /dev/null @@ -1,275 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "fmt" - "log" - "net/url" - "reflect" - "strings" - - "github.com/go-openapi/swag" -) - -// PathLoader function to use when loading remote refs -var PathLoader func(string) (json.RawMessage, error) - -func init() { - PathLoader = func(path string) (json.RawMessage, error) { - data, err := swag.LoadFromFileOrHTTP(path) - if err != nil { - return nil, err - } - return json.RawMessage(data), nil - } -} - -// resolverContext allows to share a context during spec processing. -// At the moment, it just holds the index of circular references found. -type resolverContext struct { - // circulars holds all visited circular references, which allows shortcuts. - // NOTE: this is not just a performance improvement: it is required to figure out - // circular references which participate several cycles. - // This structure is privately instantiated and needs not be locked against - // concurrent access, unless we chose to implement a parallel spec walking. - circulars map[string]bool - basePath string -} - -func newResolverContext(originalBasePath string) *resolverContext { - return &resolverContext{ - circulars: make(map[string]bool), - basePath: originalBasePath, // keep the root base path in context - } -} - -type schemaLoader struct { - root interface{} - options *ExpandOptions - cache ResolutionCache - context *resolverContext - loadDoc func(string) (json.RawMessage, error) -} - -func (r *schemaLoader) transitiveResolver(basePath string, ref Ref) (*schemaLoader, error) { - if ref.IsRoot() || ref.HasFragmentOnly { - return r, nil - } - - baseRef, _ := NewRef(basePath) - currentRef := normalizeFileRef(&ref, basePath) - if strings.HasPrefix(currentRef.String(), baseRef.String()) { - return r, nil - } - - // Set a new root to resolve against - rootURL := currentRef.GetURL() - rootURL.Fragment = "" - root, _ := r.cache.Get(rootURL.String()) - - // shallow copy of resolver options to set a new RelativeBase when - // traversing multiple documents - newOptions := r.options - newOptions.RelativeBase = rootURL.String() - debugLog("setting new root: %s", newOptions.RelativeBase) - resolver, err := defaultSchemaLoader(root, newOptions, r.cache, r.context) - if err != nil { - return nil, err - } - - return resolver, nil -} - -func (r *schemaLoader) updateBasePath(transitive *schemaLoader, basePath string) string { - if transitive != r { - debugLog("got a new resolver") - if transitive.options != nil && transitive.options.RelativeBase != "" { - basePath, _ = absPath(transitive.options.RelativeBase) - debugLog("new basePath = %s", basePath) - } - } - return basePath -} - -func (r *schemaLoader) resolveRef(ref *Ref, target interface{}, basePath string) error { - tgt := reflect.ValueOf(target) - if tgt.Kind() != reflect.Ptr { - return fmt.Errorf("resolve ref: target needs to be a pointer") - } - - refURL := ref.GetURL() - if refURL == nil { - return nil - } - - var res interface{} - var data interface{} - var err error - // Resolve against the root if it isn't nil, and if ref is pointing at the root, or has a fragment only which means - // it is pointing somewhere in the root. - root := r.root - if (ref.IsRoot() || ref.HasFragmentOnly) && root == nil && basePath != "" { - if baseRef, erb := NewRef(basePath); erb == nil { - root, _, _, _ = r.load(baseRef.GetURL()) - } - } - if (ref.IsRoot() || ref.HasFragmentOnly) && root != nil { - data = root - } else { - baseRef := normalizeFileRef(ref, basePath) - debugLog("current ref is: %s", ref.String()) - debugLog("current ref normalized file: %s", baseRef.String()) - data, _, _, err = r.load(baseRef.GetURL()) - if err != nil { - return err - } - } - - res = data - if ref.String() != "" { - res, _, err = ref.GetPointer().Get(data) - if err != nil { - return err - } - } - return swag.DynamicJSONToStruct(res, target) -} - -func (r *schemaLoader) load(refURL *url.URL) (interface{}, url.URL, bool, error) { - debugLog("loading schema from url: %s", refURL) - toFetch := *refURL - toFetch.Fragment = "" - - normalized := normalizeAbsPath(toFetch.String()) - - data, fromCache := r.cache.Get(normalized) - if !fromCache { - b, err := r.loadDoc(normalized) - if err != nil { - return nil, url.URL{}, false, err - } - - if err := json.Unmarshal(b, &data); err != nil { - return nil, url.URL{}, false, err - } - r.cache.Set(normalized, data) - } - - return data, toFetch, fromCache, nil -} - -// isCircular detects cycles in sequences of $ref. -// It relies on a private context (which needs not be locked). -func (r *schemaLoader) isCircular(ref *Ref, basePath string, parentRefs ...string) (foundCycle bool) { - normalizedRef := normalizePaths(ref.String(), basePath) - if _, ok := r.context.circulars[normalizedRef]; ok { - // circular $ref has been already detected in another explored cycle - foundCycle = true - return - } - foundCycle = swag.ContainsStringsCI(parentRefs, normalizedRef) - if foundCycle { - r.context.circulars[normalizedRef] = true - } - return -} - -// Resolve resolves a reference against basePath and stores the result in target -// Resolve is not in charge of following references, it only resolves ref by following its URL -// if the schema that ref is referring to has more refs in it. Resolve doesn't resolve them -// if basePath is an empty string, ref is resolved against the root schema stored in the schemaLoader struct -func (r *schemaLoader) Resolve(ref *Ref, target interface{}, basePath string) error { - return r.resolveRef(ref, target, basePath) -} - -func (r *schemaLoader) deref(input interface{}, parentRefs []string, basePath string) error { - var ref *Ref - switch refable := input.(type) { - case *Schema: - ref = &refable.Ref - case *Parameter: - ref = &refable.Ref - case *Response: - ref = &refable.Ref - case *PathItem: - ref = &refable.Ref - default: - return fmt.Errorf("deref: unsupported type %T", input) - } - - curRef := ref.String() - if curRef != "" { - normalizedRef := normalizeFileRef(ref, basePath) - normalizedBasePath := normalizedRef.RemoteURI() - - if r.isCircular(normalizedRef, basePath, parentRefs...) { - return nil - } - - if err := r.resolveRef(ref, input, basePath); r.shouldStopOnError(err) { - return err - } - - // NOTE(fredbi): removed basePath check => needs more testing - if ref.String() != "" && ref.String() != curRef { - parentRefs = append(parentRefs, normalizedRef.String()) - return r.deref(input, parentRefs, normalizedBasePath) - } - } - - return nil -} - -func (r *schemaLoader) shouldStopOnError(err error) bool { - if err != nil && !r.options.ContinueOnError { - return true - } - - if err != nil { - log.Println(err) - } - - return false -} - -func defaultSchemaLoader( - root interface{}, - expandOptions *ExpandOptions, - cache ResolutionCache, - context *resolverContext) (*schemaLoader, error) { - - if cache == nil { - cache = resCache - } - if expandOptions == nil { - expandOptions = &ExpandOptions{} - } - absBase, _ := absPath(expandOptions.RelativeBase) - if context == nil { - context = newResolverContext(absBase) - } - return &schemaLoader{ - root: root, - options: expandOptions, - cache: cache, - context: context, - loadDoc: func(path string) (json.RawMessage, error) { - debugLog("fetching document at %q", path) - return PathLoader(path) - }, - }, nil -} diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/security_scheme.go b/cmd/bpa-operator/vendor/github.com/go-openapi/spec/security_scheme.go deleted file mode 100644 index fe35384..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/security_scheme.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -const ( - basic = "basic" - apiKey = "apiKey" - oauth2 = "oauth2" - implicit = "implicit" - password = "password" - application = "application" - accessCode = "accessCode" -) - -// BasicAuth creates a basic auth security scheme -func BasicAuth() *SecurityScheme { - return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{Type: basic}} -} - -// APIKeyAuth creates an api key auth security scheme -func APIKeyAuth(fieldName, valueSource string) *SecurityScheme { - return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{Type: apiKey, Name: fieldName, In: valueSource}} -} - -// OAuth2Implicit creates an implicit flow oauth2 security scheme -func OAuth2Implicit(authorizationURL string) *SecurityScheme { - return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ - Type: oauth2, - Flow: implicit, - AuthorizationURL: authorizationURL, - }} -} - -// OAuth2Password creates a password flow oauth2 security scheme -func OAuth2Password(tokenURL string) *SecurityScheme { - return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ - Type: oauth2, - Flow: password, - TokenURL: tokenURL, - }} -} - -// OAuth2Application creates an application flow oauth2 security scheme -func OAuth2Application(tokenURL string) *SecurityScheme { - return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ - Type: oauth2, - Flow: application, - TokenURL: tokenURL, - }} -} - -// OAuth2AccessToken creates an access token flow oauth2 security scheme -func OAuth2AccessToken(authorizationURL, tokenURL string) *SecurityScheme { - return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ - Type: oauth2, - Flow: accessCode, - AuthorizationURL: authorizationURL, - TokenURL: tokenURL, - }} -} - -// SecuritySchemeProps describes a swagger security scheme in the securityDefinitions section -type SecuritySchemeProps struct { - Description string `json:"description,omitempty"` - Type string `json:"type"` - Name string `json:"name,omitempty"` // api key - In string `json:"in,omitempty"` // api key - Flow string `json:"flow,omitempty"` // oauth2 - AuthorizationURL string `json:"authorizationUrl,omitempty"` // oauth2 - TokenURL string `json:"tokenUrl,omitempty"` // oauth2 - Scopes map[string]string `json:"scopes,omitempty"` // oauth2 -} - -// AddScope adds a scope to this security scheme -func (s *SecuritySchemeProps) AddScope(scope, description string) { - if s.Scopes == nil { - s.Scopes = make(map[string]string) - } - s.Scopes[scope] = description -} - -// SecurityScheme allows the definition of a security scheme that can be used by the operations. -// Supported schemes are basic authentication, an API key (either as a header or as a query parameter) -// and OAuth2's common flows (implicit, password, application and access code). -// -// For more information: http://goo.gl/8us55a#securitySchemeObject -type SecurityScheme struct { - VendorExtensible - SecuritySchemeProps -} - -// JSONLookup implements an interface to customize json pointer lookup -func (s SecurityScheme) JSONLookup(token string) (interface{}, error) { - if ex, ok := s.Extensions[token]; ok { - return &ex, nil - } - - r, _, err := jsonpointer.GetForToken(s.SecuritySchemeProps, token) - return r, err -} - -// MarshalJSON marshal this to JSON -func (s SecurityScheme) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(s.SecuritySchemeProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(s.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2), nil -} - -// UnmarshalJSON marshal this from JSON -func (s *SecurityScheme) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &s.SecuritySchemeProps); err != nil { - return err - } - return json.Unmarshal(data, &s.VendorExtensible) -} diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/spec.go b/cmd/bpa-operator/vendor/github.com/go-openapi/spec/spec.go deleted file mode 100644 index 0bb045b..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/spec.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import "encoding/json" - -//go:generate curl -L --progress -o ./schemas/v2/schema.json http://swagger.io/v2/schema.json -//go:generate curl -L --progress -o ./schemas/jsonschema-draft-04.json http://json-schema.org/draft-04/schema -//go:generate go-bindata -pkg=spec -prefix=./schemas -ignore=.*\.md ./schemas/... -//go:generate perl -pi -e s,Json,JSON,g bindata.go - -const ( - // SwaggerSchemaURL the url for the swagger 2.0 schema to validate specs - SwaggerSchemaURL = "http://swagger.io/v2/schema.json#" - // JSONSchemaURL the url for the json schema schema - JSONSchemaURL = "http://json-schema.org/draft-04/schema#" -) - -var ( - jsonSchema *Schema - swaggerSchema *Schema -) - -func init() { - jsonSchema = MustLoadJSONSchemaDraft04() - swaggerSchema = MustLoadSwagger20Schema() -} - -// MustLoadJSONSchemaDraft04 panics when Swagger20Schema returns an error -func MustLoadJSONSchemaDraft04() *Schema { - d, e := JSONSchemaDraft04() - if e != nil { - panic(e) - } - return d -} - -// JSONSchemaDraft04 loads the json schema document for json shema draft04 -func JSONSchemaDraft04() (*Schema, error) { - b, err := Asset("jsonschema-draft-04.json") - if err != nil { - return nil, err - } - - schema := new(Schema) - if err := json.Unmarshal(b, schema); err != nil { - return nil, err - } - return schema, nil -} - -// MustLoadSwagger20Schema panics when Swagger20Schema returns an error -func MustLoadSwagger20Schema() *Schema { - d, e := Swagger20Schema() - if e != nil { - panic(e) - } - return d -} - -// Swagger20Schema loads the swagger 2.0 schema from the embedded assets -func Swagger20Schema() (*Schema, error) { - - b, err := Asset("v2/schema.json") - if err != nil { - return nil, err - } - - schema := new(Schema) - if err := json.Unmarshal(b, schema); err != nil { - return nil, err - } - return schema, nil -} diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/swagger.go b/cmd/bpa-operator/vendor/github.com/go-openapi/spec/swagger.go deleted file mode 100644 index 454617e..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/swagger.go +++ /dev/null @@ -1,324 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "fmt" - "strconv" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -// Swagger this is the root document object for the API specification. -// It combines what previously was the Resource Listing and API Declaration (version 1.2 and earlier) -// together into one document. -// -// For more information: http://goo.gl/8us55a#swagger-object- -type Swagger struct { - VendorExtensible - SwaggerProps -} - -// JSONLookup look up a value by the json property name -func (s Swagger) JSONLookup(token string) (interface{}, error) { - if ex, ok := s.Extensions[token]; ok { - return &ex, nil - } - r, _, err := jsonpointer.GetForToken(s.SwaggerProps, token) - return r, err -} - -// MarshalJSON marshals this swagger structure to json -func (s Swagger) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(s.SwaggerProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(s.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2), nil -} - -// UnmarshalJSON unmarshals a swagger spec from json -func (s *Swagger) UnmarshalJSON(data []byte) error { - var sw Swagger - if err := json.Unmarshal(data, &sw.SwaggerProps); err != nil { - return err - } - if err := json.Unmarshal(data, &sw.VendorExtensible); err != nil { - return err - } - *s = sw - return nil -} - -// SwaggerProps captures the top-level properties of an Api specification -// -// NOTE: validation rules -// - the scheme, when present must be from [http, https, ws, wss] -// - BasePath must start with a leading "/" -// - Paths is required -type SwaggerProps struct { - ID string `json:"id,omitempty"` - Consumes []string `json:"consumes,omitempty"` - Produces []string `json:"produces,omitempty"` - Schemes []string `json:"schemes,omitempty"` - Swagger string `json:"swagger,omitempty"` - Info *Info `json:"info,omitempty"` - Host string `json:"host,omitempty"` - BasePath string `json:"basePath,omitempty"` - Paths *Paths `json:"paths"` - Definitions Definitions `json:"definitions,omitempty"` - Parameters map[string]Parameter `json:"parameters,omitempty"` - Responses map[string]Response `json:"responses,omitempty"` - SecurityDefinitions SecurityDefinitions `json:"securityDefinitions,omitempty"` - Security []map[string][]string `json:"security,omitempty"` - Tags []Tag `json:"tags,omitempty"` - ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` -} - -// Dependencies represent a dependencies property -type Dependencies map[string]SchemaOrStringArray - -// SchemaOrBool represents a schema or boolean value, is biased towards true for the boolean property -type SchemaOrBool struct { - Allows bool - Schema *Schema -} - -// JSONLookup implements an interface to customize json pointer lookup -func (s SchemaOrBool) JSONLookup(token string) (interface{}, error) { - if token == "allows" { - return s.Allows, nil - } - r, _, err := jsonpointer.GetForToken(s.Schema, token) - return r, err -} - -var jsTrue = []byte("true") -var jsFalse = []byte("false") - -// MarshalJSON convert this object to JSON -func (s SchemaOrBool) MarshalJSON() ([]byte, error) { - if s.Schema != nil { - return json.Marshal(s.Schema) - } - - if s.Schema == nil && !s.Allows { - return jsFalse, nil - } - return jsTrue, nil -} - -// UnmarshalJSON converts this bool or schema object from a JSON structure -func (s *SchemaOrBool) UnmarshalJSON(data []byte) error { - var nw SchemaOrBool - if len(data) >= 4 { - if data[0] == '{' { - var sch Schema - if err := json.Unmarshal(data, &sch); err != nil { - return err - } - nw.Schema = &sch - } - nw.Allows = !(data[0] == 'f' && data[1] == 'a' && data[2] == 'l' && data[3] == 's' && data[4] == 'e') - } - *s = nw - return nil -} - -// SchemaOrStringArray represents a schema or a string array -type SchemaOrStringArray struct { - Schema *Schema - Property []string -} - -// JSONLookup implements an interface to customize json pointer lookup -func (s SchemaOrStringArray) JSONLookup(token string) (interface{}, error) { - r, _, err := jsonpointer.GetForToken(s.Schema, token) - return r, err -} - -// MarshalJSON converts this schema object or array into JSON structure -func (s SchemaOrStringArray) MarshalJSON() ([]byte, error) { - if len(s.Property) > 0 { - return json.Marshal(s.Property) - } - if s.Schema != nil { - return json.Marshal(s.Schema) - } - return []byte("null"), nil -} - -// UnmarshalJSON converts this schema object or array from a JSON structure -func (s *SchemaOrStringArray) UnmarshalJSON(data []byte) error { - var first byte - if len(data) > 1 { - first = data[0] - } - var nw SchemaOrStringArray - if first == '{' { - var sch Schema - if err := json.Unmarshal(data, &sch); err != nil { - return err - } - nw.Schema = &sch - } - if first == '[' { - if err := json.Unmarshal(data, &nw.Property); err != nil { - return err - } - } - *s = nw - return nil -} - -// Definitions contains the models explicitly defined in this spec -// An object to hold data types that can be consumed and produced by operations. -// These data types can be primitives, arrays or models. -// -// For more information: http://goo.gl/8us55a#definitionsObject -type Definitions map[string]Schema - -// SecurityDefinitions a declaration of the security schemes available to be used in the specification. -// This does not enforce the security schemes on the operations and only serves to provide -// the relevant details for each scheme. -// -// For more information: http://goo.gl/8us55a#securityDefinitionsObject -type SecurityDefinitions map[string]*SecurityScheme - -// StringOrArray represents a value that can either be a string -// or an array of strings. Mainly here for serialization purposes -type StringOrArray []string - -// Contains returns true when the value is contained in the slice -func (s StringOrArray) Contains(value string) bool { - for _, str := range s { - if str == value { - return true - } - } - return false -} - -// JSONLookup implements an interface to customize json pointer lookup -func (s SchemaOrArray) JSONLookup(token string) (interface{}, error) { - if _, err := strconv.Atoi(token); err == nil { - r, _, err := jsonpointer.GetForToken(s.Schemas, token) - return r, err - } - r, _, err := jsonpointer.GetForToken(s.Schema, token) - return r, err -} - -// UnmarshalJSON unmarshals this string or array object from a JSON array or JSON string -func (s *StringOrArray) UnmarshalJSON(data []byte) error { - var first byte - if len(data) > 1 { - first = data[0] - } - - if first == '[' { - var parsed []string - if err := json.Unmarshal(data, &parsed); err != nil { - return err - } - *s = StringOrArray(parsed) - return nil - } - - var single interface{} - if err := json.Unmarshal(data, &single); err != nil { - return err - } - if single == nil { - return nil - } - switch v := single.(type) { - case string: - *s = StringOrArray([]string{v}) - return nil - default: - return fmt.Errorf("only string or array is allowed, not %T", single) - } -} - -// MarshalJSON converts this string or array to a JSON array or JSON string -func (s StringOrArray) MarshalJSON() ([]byte, error) { - if len(s) == 1 { - return json.Marshal([]string(s)[0]) - } - return json.Marshal([]string(s)) -} - -// SchemaOrArray represents a value that can either be a Schema -// or an array of Schema. Mainly here for serialization purposes -type SchemaOrArray struct { - Schema *Schema - Schemas []Schema -} - -// Len returns the number of schemas in this property -func (s SchemaOrArray) Len() int { - if s.Schema != nil { - return 1 - } - return len(s.Schemas) -} - -// ContainsType returns true when one of the schemas is of the specified type -func (s *SchemaOrArray) ContainsType(name string) bool { - if s.Schema != nil { - return s.Schema.Type != nil && s.Schema.Type.Contains(name) - } - return false -} - -// MarshalJSON converts this schema object or array into JSON structure -func (s SchemaOrArray) MarshalJSON() ([]byte, error) { - if len(s.Schemas) > 0 { - return json.Marshal(s.Schemas) - } - return json.Marshal(s.Schema) -} - -// UnmarshalJSON converts this schema object or array from a JSON structure -func (s *SchemaOrArray) UnmarshalJSON(data []byte) error { - var nw SchemaOrArray - var first byte - if len(data) > 1 { - first = data[0] - } - if first == '{' { - var sch Schema - if err := json.Unmarshal(data, &sch); err != nil { - return err - } - nw.Schema = &sch - } - if first == '[' { - if err := json.Unmarshal(data, &nw.Schemas); err != nil { - return err - } - } - *s = nw - return nil -} - -// vim:set ft=go noet sts=2 sw=2 ts=2: diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/tag.go b/cmd/bpa-operator/vendor/github.com/go-openapi/spec/tag.go deleted file mode 100644 index faa3d3d..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/tag.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -// TagProps describe a tag entry in the top level tags section of a swagger spec -type TagProps struct { - Description string `json:"description,omitempty"` - Name string `json:"name,omitempty"` - ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` -} - -// NewTag creates a new tag -func NewTag(name, description string, externalDocs *ExternalDocumentation) Tag { - return Tag{TagProps: TagProps{Description: description, Name: name, ExternalDocs: externalDocs}} -} - -// Tag allows adding meta data to a single tag that is used by the -// [Operation Object](http://goo.gl/8us55a#operationObject). -// It is not mandatory to have a Tag Object per tag used there. -// -// For more information: http://goo.gl/8us55a#tagObject -type Tag struct { - VendorExtensible - TagProps -} - -// JSONLookup implements an interface to customize json pointer lookup -func (t Tag) JSONLookup(token string) (interface{}, error) { - if ex, ok := t.Extensions[token]; ok { - return &ex, nil - } - - r, _, err := jsonpointer.GetForToken(t.TagProps, token) - return r, err -} - -// MarshalJSON marshal this to JSON -func (t Tag) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(t.TagProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(t.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2), nil -} - -// UnmarshalJSON marshal this from JSON -func (t *Tag) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &t.TagProps); err != nil { - return err - } - return json.Unmarshal(data, &t.VendorExtensible) -} diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/unused.go b/cmd/bpa-operator/vendor/github.com/go-openapi/spec/unused.go deleted file mode 100644 index aa12b56..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/unused.go +++ /dev/null @@ -1,174 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -/* - -import ( - "net/url" - "os" - "path" - "path/filepath" - - "github.com/go-openapi/jsonpointer" -) - - // Some currently unused functions and definitions that - // used to be part of the expander. - - // Moved here for the record and possible future reuse - -var ( - idPtr, _ = jsonpointer.New("/id") - refPtr, _ = jsonpointer.New("/$ref") -) - -func idFromNode(node interface{}) (*Ref, error) { - if idValue, _, err := idPtr.Get(node); err == nil { - if refStr, ok := idValue.(string); ok && refStr != "" { - idRef, err := NewRef(refStr) - if err != nil { - return nil, err - } - return &idRef, nil - } - } - return nil, nil -} - -func nextRef(startingNode interface{}, startingRef *Ref, ptr *jsonpointer.Pointer) *Ref { - if startingRef == nil { - return nil - } - - if ptr == nil { - return startingRef - } - - ret := startingRef - var idRef *Ref - node := startingNode - - for _, tok := range ptr.DecodedTokens() { - node, _, _ = jsonpointer.GetForToken(node, tok) - if node == nil { - break - } - - idRef, _ = idFromNode(node) - if idRef != nil { - nw, err := ret.Inherits(*idRef) - if err != nil { - break - } - ret = nw - } - - refRef, _, _ := refPtr.Get(node) - if refRef != nil { - var rf Ref - switch value := refRef.(type) { - case string: - rf, _ = NewRef(value) - } - nw, err := ret.Inherits(rf) - if err != nil { - break - } - nwURL := nw.GetURL() - if nwURL.Scheme == "file" || (nwURL.Scheme == "" && nwURL.Host == "") { - nwpt := filepath.ToSlash(nwURL.Path) - if filepath.IsAbs(nwpt) { - _, err := os.Stat(nwpt) - if err != nil { - nwURL.Path = filepath.Join(".", nwpt) - } - } - } - - ret = nw - } - - } - - return ret -} - -// basePathFromSchemaID returns a new basePath based on an existing basePath and a schema ID -func basePathFromSchemaID(oldBasePath, id string) string { - u, err := url.Parse(oldBasePath) - if err != nil { - panic(err) - } - uid, err := url.Parse(id) - if err != nil { - panic(err) - } - - if path.IsAbs(uid.Path) { - return id - } - u.Path = path.Join(path.Dir(u.Path), uid.Path) - return u.String() -} -*/ - -// type ExtraSchemaProps map[string]interface{} - -// // JSONSchema represents a structure that is a json schema draft 04 -// type JSONSchema struct { -// SchemaProps -// ExtraSchemaProps -// } - -// // MarshalJSON marshal this to JSON -// func (s JSONSchema) MarshalJSON() ([]byte, error) { -// b1, err := json.Marshal(s.SchemaProps) -// if err != nil { -// return nil, err -// } -// b2, err := s.Ref.MarshalJSON() -// if err != nil { -// return nil, err -// } -// b3, err := s.Schema.MarshalJSON() -// if err != nil { -// return nil, err -// } -// b4, err := json.Marshal(s.ExtraSchemaProps) -// if err != nil { -// return nil, err -// } -// return swag.ConcatJSON(b1, b2, b3, b4), nil -// } - -// // UnmarshalJSON marshal this from JSON -// func (s *JSONSchema) UnmarshalJSON(data []byte) error { -// var sch JSONSchema -// if err := json.Unmarshal(data, &sch.SchemaProps); err != nil { -// return err -// } -// if err := json.Unmarshal(data, &sch.Ref); err != nil { -// return err -// } -// if err := json.Unmarshal(data, &sch.Schema); err != nil { -// return err -// } -// if err := json.Unmarshal(data, &sch.ExtraSchemaProps); err != nil { -// return err -// } -// *s = sch -// return nil -// } diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/xml_object.go b/cmd/bpa-operator/vendor/github.com/go-openapi/spec/xml_object.go deleted file mode 100644 index 945a467..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/spec/xml_object.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -// XMLObject a metadata object that allows for more fine-tuned XML model definitions. -// -// For more information: http://goo.gl/8us55a#xmlObject -type XMLObject struct { - Name string `json:"name,omitempty"` - Namespace string `json:"namespace,omitempty"` - Prefix string `json:"prefix,omitempty"` - Attribute bool `json:"attribute,omitempty"` - Wrapped bool `json:"wrapped,omitempty"` -} - -// WithName sets the xml name for the object -func (x *XMLObject) WithName(name string) *XMLObject { - x.Name = name - return x -} - -// WithNamespace sets the xml namespace for the object -func (x *XMLObject) WithNamespace(namespace string) *XMLObject { - x.Namespace = namespace - return x -} - -// WithPrefix sets the xml prefix for the object -func (x *XMLObject) WithPrefix(prefix string) *XMLObject { - x.Prefix = prefix - return x -} - -// AsAttribute flags this object as xml attribute -func (x *XMLObject) AsAttribute() *XMLObject { - x.Attribute = true - return x -} - -// AsElement flags this object as an xml node -func (x *XMLObject) AsElement() *XMLObject { - x.Attribute = false - return x -} - -// AsWrapped flags this object as wrapped, this is mostly useful for array types -func (x *XMLObject) AsWrapped() *XMLObject { - x.Wrapped = true - return x -} - -// AsUnwrapped flags this object as an xml node -func (x *XMLObject) AsUnwrapped() *XMLObject { - x.Wrapped = false - return x -} diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/swag/.editorconfig b/cmd/bpa-operator/vendor/github.com/go-openapi/swag/.editorconfig deleted file mode 100644 index 3152da6..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/swag/.editorconfig +++ /dev/null @@ -1,26 +0,0 @@ -# top-most EditorConfig file -root = true - -# Unix-style newlines with a newline ending every file -[*] -end_of_line = lf -insert_final_newline = true -indent_style = space -indent_size = 2 -trim_trailing_whitespace = true - -# Set default charset -[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] -charset = utf-8 - -# Tab indentation (no size specified) -[*.go] -indent_style = tab - -[*.md] -trim_trailing_whitespace = false - -# Matches the exact files either package.json or .travis.yml -[{package.json,.travis.yml}] -indent_style = space -indent_size = 2 diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/swag/.gitignore b/cmd/bpa-operator/vendor/github.com/go-openapi/swag/.gitignore deleted file mode 100644 index 5862205..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/swag/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -secrets.yml -vendor -Godeps diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/swag/.golangci.yml b/cmd/bpa-operator/vendor/github.com/go-openapi/swag/.golangci.yml deleted file mode 100644 index 625c3d6..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/swag/.golangci.yml +++ /dev/null @@ -1,22 +0,0 @@ -linters-settings: - govet: - check-shadowing: true - golint: - min-confidence: 0 - gocyclo: - min-complexity: 25 - maligned: - suggest-new: true - dupl: - threshold: 100 - goconst: - min-len: 3 - min-occurrences: 2 - -linters: - enable-all: true - disable: - - maligned - - lll - - gochecknoinits - - gochecknoglobals diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/swag/.travis.yml b/cmd/bpa-operator/vendor/github.com/go-openapi/swag/.travis.yml deleted file mode 100644 index bd3a2e5..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/swag/.travis.yml +++ /dev/null @@ -1,16 +0,0 @@ -after_success: -- bash <(curl -s https://codecov.io/bash) -go: -- '1.9' -- 1.10.x -- 1.11.x -install: -- go get -u github.com/stretchr/testify -- go get -u github.com/mailru/easyjson -- go get -u gopkg.in/yaml.v2 -language: go -notifications: - slack: - secure: QUWvCkBBK09GF7YtEvHHVt70JOkdlNBG0nIKu/5qc4/nW5HP8I2w0SEf/XR2je0eED1Qe3L/AfMCWwrEj+IUZc3l4v+ju8X8R3Lomhme0Eb0jd1MTMCuPcBT47YCj0M7RON7vXtbFfm1hFJ/jLe5+9FXz0hpXsR24PJc5ZIi/ogNwkaPqG4BmndzecpSh0vc2FJPZUD9LT0I09REY/vXR0oQAalLkW0asGD5taHZTUZq/kBpsNxaAFrLM23i4mUcf33M5fjLpvx5LRICrX/57XpBrDh2TooBU6Qj3CgoY0uPRYUmSNxbVx1czNzl2JtEpb5yjoxfVPQeg0BvQM00G8LJINISR+ohrjhkZmAqchDupAX+yFrxTtORa78CtnIL6z/aTNlgwwVD8kvL/1pFA/JWYmKDmz93mV/+6wubGzNSQCstzjkFA4/iZEKewKUoRIAi/fxyscP6L/rCpmY/4llZZvrnyTqVbt6URWpopUpH4rwYqreXAtJxJsfBJIeSmUIiDIOMGkCTvyTEW3fWGmGoqWtSHLoaWDyAIGb7azb+KvfpWtEcoPFWfSWU+LGee0A/YsUhBl7ADB9A0CJEuR8q4BPpKpfLwPKSiKSAXL7zDkyjExyhtgqbSl2jS+rKIHOZNL8JkCcTP2MKMVd563C5rC5FMKqu3S9m2b6380E= -script: -- go test -v -race -cover -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md b/cmd/bpa-operator/vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md deleted file mode 100644 index 9322b06..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/swag/LICENSE b/cmd/bpa-operator/vendor/github.com/go-openapi/swag/LICENSE deleted file mode 100644 index d645695..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/swag/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/swag/README.md b/cmd/bpa-operator/vendor/github.com/go-openapi/swag/README.md deleted file mode 100644 index 459a3e1..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/swag/README.md +++ /dev/null @@ -1,23 +0,0 @@ -# Swag [![Build Status](https://travis-ci.org/go-openapi/swag.svg?branch=master)](https://travis-ci.org/go-openapi/swag) [![codecov](https://codecov.io/gh/go-openapi/swag/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/swag) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) - -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/swag/master/LICENSE) -[![GoDoc](https://godoc.org/github.com/go-openapi/swag?status.svg)](http://godoc.org/github.com/go-openapi/swag) -[![GolangCI](https://golangci.com/badges/github.com/go-openapi/swag.svg)](https://golangci.com) -[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/swag)](https://goreportcard.com/report/github.com/go-openapi/swag) - -Contains a bunch of helper functions for go-openapi and go-swagger projects. - -You may also use it standalone for your projects. - -* convert between value and pointers for builtin types -* convert from string to builtin types (wraps strconv) -* fast json concatenation -* search in path -* load from file or http -* name mangling - - -This repo has only few dependencies outside of the standard library: - -* JSON utilities depend on github.com/mailru/easyjson -* YAML utilities depend on gopkg.in/yaml.v2 diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/swag/convert.go b/cmd/bpa-operator/vendor/github.com/go-openapi/swag/convert.go deleted file mode 100644 index 4e446ff..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/swag/convert.go +++ /dev/null @@ -1,207 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package swag - -import ( - "math" - "strconv" - "strings" -) - -// same as ECMA Number.MAX_SAFE_INTEGER and Number.MIN_SAFE_INTEGER -const ( - maxJSONFloat = float64(1<<53 - 1) // 9007199254740991.0 2^53 - 1 - minJSONFloat = -float64(1<<53 - 1) //-9007199254740991.0 -2^53 - 1 - epsilon float64 = 1e-9 -) - -// IsFloat64AJSONInteger allow for integers [-2^53, 2^53-1] inclusive -func IsFloat64AJSONInteger(f float64) bool { - if math.IsNaN(f) || math.IsInf(f, 0) || f < minJSONFloat || f > maxJSONFloat { - return false - } - fa := math.Abs(f) - g := float64(uint64(f)) - ga := math.Abs(g) - - diff := math.Abs(f - g) - - // more info: https://floating-point-gui.de/errors/comparison/#look-out-for-edge-cases - if f == g { // best case - return true - } else if f == float64(int64(f)) || f == float64(uint64(f)) { // optimistic case - return true - } else if f == 0 || g == 0 || diff < math.SmallestNonzeroFloat64 { // very close to 0 values - return diff < (epsilon * math.SmallestNonzeroFloat64) - } - // check the relative error - return diff/math.Min(fa+ga, math.MaxFloat64) < epsilon -} - -var evaluatesAsTrue map[string]struct{} - -func init() { - evaluatesAsTrue = map[string]struct{}{ - "true": {}, - "1": {}, - "yes": {}, - "ok": {}, - "y": {}, - "on": {}, - "selected": {}, - "checked": {}, - "t": {}, - "enabled": {}, - } -} - -// ConvertBool turn a string into a boolean -func ConvertBool(str string) (bool, error) { - _, ok := evaluatesAsTrue[strings.ToLower(str)] - return ok, nil -} - -// ConvertFloat32 turn a string into a float32 -func ConvertFloat32(str string) (float32, error) { - f, err := strconv.ParseFloat(str, 32) - if err != nil { - return 0, err - } - return float32(f), nil -} - -// ConvertFloat64 turn a string into a float64 -func ConvertFloat64(str string) (float64, error) { - return strconv.ParseFloat(str, 64) -} - -// ConvertInt8 turn a string into int8 boolean -func ConvertInt8(str string) (int8, error) { - i, err := strconv.ParseInt(str, 10, 8) - if err != nil { - return 0, err - } - return int8(i), nil -} - -// ConvertInt16 turn a string into a int16 -func ConvertInt16(str string) (int16, error) { - i, err := strconv.ParseInt(str, 10, 16) - if err != nil { - return 0, err - } - return int16(i), nil -} - -// ConvertInt32 turn a string into a int32 -func ConvertInt32(str string) (int32, error) { - i, err := strconv.ParseInt(str, 10, 32) - if err != nil { - return 0, err - } - return int32(i), nil -} - -// ConvertInt64 turn a string into a int64 -func ConvertInt64(str string) (int64, error) { - return strconv.ParseInt(str, 10, 64) -} - -// ConvertUint8 turn a string into a uint8 -func ConvertUint8(str string) (uint8, error) { - i, err := strconv.ParseUint(str, 10, 8) - if err != nil { - return 0, err - } - return uint8(i), nil -} - -// ConvertUint16 turn a string into a uint16 -func ConvertUint16(str string) (uint16, error) { - i, err := strconv.ParseUint(str, 10, 16) - if err != nil { - return 0, err - } - return uint16(i), nil -} - -// ConvertUint32 turn a string into a uint32 -func ConvertUint32(str string) (uint32, error) { - i, err := strconv.ParseUint(str, 10, 32) - if err != nil { - return 0, err - } - return uint32(i), nil -} - -// ConvertUint64 turn a string into a uint64 -func ConvertUint64(str string) (uint64, error) { - return strconv.ParseUint(str, 10, 64) -} - -// FormatBool turns a boolean into a string -func FormatBool(value bool) string { - return strconv.FormatBool(value) -} - -// FormatFloat32 turns a float32 into a string -func FormatFloat32(value float32) string { - return strconv.FormatFloat(float64(value), 'f', -1, 32) -} - -// FormatFloat64 turns a float64 into a string -func FormatFloat64(value float64) string { - return strconv.FormatFloat(value, 'f', -1, 64) -} - -// FormatInt8 turns an int8 into a string -func FormatInt8(value int8) string { - return strconv.FormatInt(int64(value), 10) -} - -// FormatInt16 turns an int16 into a string -func FormatInt16(value int16) string { - return strconv.FormatInt(int64(value), 10) -} - -// FormatInt32 turns an int32 into a string -func FormatInt32(value int32) string { - return strconv.Itoa(int(value)) -} - -// FormatInt64 turns an int64 into a string -func FormatInt64(value int64) string { - return strconv.FormatInt(value, 10) -} - -// FormatUint8 turns an uint8 into a string -func FormatUint8(value uint8) string { - return strconv.FormatUint(uint64(value), 10) -} - -// FormatUint16 turns an uint16 into a string -func FormatUint16(value uint16) string { - return strconv.FormatUint(uint64(value), 10) -} - -// FormatUint32 turns an uint32 into a string -func FormatUint32(value uint32) string { - return strconv.FormatUint(uint64(value), 10) -} - -// FormatUint64 turns an uint64 into a string -func FormatUint64(value uint64) string { - return strconv.FormatUint(value, 10) -} diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/swag/convert_types.go b/cmd/bpa-operator/vendor/github.com/go-openapi/swag/convert_types.go deleted file mode 100644 index c95e4e7..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/swag/convert_types.go +++ /dev/null @@ -1,595 +0,0 @@ -package swag - -import "time" - -// This file was taken from the aws go sdk - -// String returns a pointer to of the string value passed in. -func String(v string) *string { - return &v -} - -// StringValue returns the value of the string pointer passed in or -// "" if the pointer is nil. -func StringValue(v *string) string { - if v != nil { - return *v - } - return "" -} - -// StringSlice converts a slice of string values into a slice of -// string pointers -func StringSlice(src []string) []*string { - dst := make([]*string, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// StringValueSlice converts a slice of string pointers into a slice of -// string values -func StringValueSlice(src []*string) []string { - dst := make([]string, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// StringMap converts a string map of string values into a string -// map of string pointers -func StringMap(src map[string]string) map[string]*string { - dst := make(map[string]*string) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// StringValueMap converts a string map of string pointers into a string -// map of string values -func StringValueMap(src map[string]*string) map[string]string { - dst := make(map[string]string) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Bool returns a pointer to of the bool value passed in. -func Bool(v bool) *bool { - return &v -} - -// BoolValue returns the value of the bool pointer passed in or -// false if the pointer is nil. -func BoolValue(v *bool) bool { - if v != nil { - return *v - } - return false -} - -// BoolSlice converts a slice of bool values into a slice of -// bool pointers -func BoolSlice(src []bool) []*bool { - dst := make([]*bool, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// BoolValueSlice converts a slice of bool pointers into a slice of -// bool values -func BoolValueSlice(src []*bool) []bool { - dst := make([]bool, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// BoolMap converts a string map of bool values into a string -// map of bool pointers -func BoolMap(src map[string]bool) map[string]*bool { - dst := make(map[string]*bool) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// BoolValueMap converts a string map of bool pointers into a string -// map of bool values -func BoolValueMap(src map[string]*bool) map[string]bool { - dst := make(map[string]bool) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Int returns a pointer to of the int value passed in. -func Int(v int) *int { - return &v -} - -// IntValue returns the value of the int pointer passed in or -// 0 if the pointer is nil. -func IntValue(v *int) int { - if v != nil { - return *v - } - return 0 -} - -// IntSlice converts a slice of int values into a slice of -// int pointers -func IntSlice(src []int) []*int { - dst := make([]*int, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// IntValueSlice converts a slice of int pointers into a slice of -// int values -func IntValueSlice(src []*int) []int { - dst := make([]int, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// IntMap converts a string map of int values into a string -// map of int pointers -func IntMap(src map[string]int) map[string]*int { - dst := make(map[string]*int) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// IntValueMap converts a string map of int pointers into a string -// map of int values -func IntValueMap(src map[string]*int) map[string]int { - dst := make(map[string]int) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Int32 returns a pointer to of the int64 value passed in. -func Int32(v int32) *int32 { - return &v -} - -// Int32Value returns the value of the int64 pointer passed in or -// 0 if the pointer is nil. -func Int32Value(v *int32) int32 { - if v != nil { - return *v - } - return 0 -} - -// Int32Slice converts a slice of int64 values into a slice of -// int32 pointers -func Int32Slice(src []int32) []*int32 { - dst := make([]*int32, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Int32ValueSlice converts a slice of int32 pointers into a slice of -// int32 values -func Int32ValueSlice(src []*int32) []int32 { - dst := make([]int32, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Int32Map converts a string map of int32 values into a string -// map of int32 pointers -func Int32Map(src map[string]int32) map[string]*int32 { - dst := make(map[string]*int32) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Int32ValueMap converts a string map of int32 pointers into a string -// map of int32 values -func Int32ValueMap(src map[string]*int32) map[string]int32 { - dst := make(map[string]int32) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Int64 returns a pointer to of the int64 value passed in. -func Int64(v int64) *int64 { - return &v -} - -// Int64Value returns the value of the int64 pointer passed in or -// 0 if the pointer is nil. -func Int64Value(v *int64) int64 { - if v != nil { - return *v - } - return 0 -} - -// Int64Slice converts a slice of int64 values into a slice of -// int64 pointers -func Int64Slice(src []int64) []*int64 { - dst := make([]*int64, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Int64ValueSlice converts a slice of int64 pointers into a slice of -// int64 values -func Int64ValueSlice(src []*int64) []int64 { - dst := make([]int64, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Int64Map converts a string map of int64 values into a string -// map of int64 pointers -func Int64Map(src map[string]int64) map[string]*int64 { - dst := make(map[string]*int64) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Int64ValueMap converts a string map of int64 pointers into a string -// map of int64 values -func Int64ValueMap(src map[string]*int64) map[string]int64 { - dst := make(map[string]int64) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Uint returns a pouinter to of the uint value passed in. -func Uint(v uint) *uint { - return &v -} - -// UintValue returns the value of the uint pouinter passed in or -// 0 if the pouinter is nil. -func UintValue(v *uint) uint { - if v != nil { - return *v - } - return 0 -} - -// UintSlice converts a slice of uint values uinto a slice of -// uint pouinters -func UintSlice(src []uint) []*uint { - dst := make([]*uint, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// UintValueSlice converts a slice of uint pouinters uinto a slice of -// uint values -func UintValueSlice(src []*uint) []uint { - dst := make([]uint, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// UintMap converts a string map of uint values uinto a string -// map of uint pouinters -func UintMap(src map[string]uint) map[string]*uint { - dst := make(map[string]*uint) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// UintValueMap converts a string map of uint pouinters uinto a string -// map of uint values -func UintValueMap(src map[string]*uint) map[string]uint { - dst := make(map[string]uint) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Uint32 returns a pouinter to of the uint64 value passed in. -func Uint32(v uint32) *uint32 { - return &v -} - -// Uint32Value returns the value of the uint64 pouinter passed in or -// 0 if the pouinter is nil. -func Uint32Value(v *uint32) uint32 { - if v != nil { - return *v - } - return 0 -} - -// Uint32Slice converts a slice of uint64 values uinto a slice of -// uint32 pouinters -func Uint32Slice(src []uint32) []*uint32 { - dst := make([]*uint32, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Uint32ValueSlice converts a slice of uint32 pouinters uinto a slice of -// uint32 values -func Uint32ValueSlice(src []*uint32) []uint32 { - dst := make([]uint32, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Uint32Map converts a string map of uint32 values uinto a string -// map of uint32 pouinters -func Uint32Map(src map[string]uint32) map[string]*uint32 { - dst := make(map[string]*uint32) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Uint32ValueMap converts a string map of uint32 pouinters uinto a string -// map of uint32 values -func Uint32ValueMap(src map[string]*uint32) map[string]uint32 { - dst := make(map[string]uint32) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Uint64 returns a pouinter to of the uint64 value passed in. -func Uint64(v uint64) *uint64 { - return &v -} - -// Uint64Value returns the value of the uint64 pouinter passed in or -// 0 if the pouinter is nil. -func Uint64Value(v *uint64) uint64 { - if v != nil { - return *v - } - return 0 -} - -// Uint64Slice converts a slice of uint64 values uinto a slice of -// uint64 pouinters -func Uint64Slice(src []uint64) []*uint64 { - dst := make([]*uint64, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Uint64ValueSlice converts a slice of uint64 pouinters uinto a slice of -// uint64 values -func Uint64ValueSlice(src []*uint64) []uint64 { - dst := make([]uint64, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Uint64Map converts a string map of uint64 values uinto a string -// map of uint64 pouinters -func Uint64Map(src map[string]uint64) map[string]*uint64 { - dst := make(map[string]*uint64) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Uint64ValueMap converts a string map of uint64 pouinters uinto a string -// map of uint64 values -func Uint64ValueMap(src map[string]*uint64) map[string]uint64 { - dst := make(map[string]uint64) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Float64 returns a pointer to of the float64 value passed in. -func Float64(v float64) *float64 { - return &v -} - -// Float64Value returns the value of the float64 pointer passed in or -// 0 if the pointer is nil. -func Float64Value(v *float64) float64 { - if v != nil { - return *v - } - return 0 -} - -// Float64Slice converts a slice of float64 values into a slice of -// float64 pointers -func Float64Slice(src []float64) []*float64 { - dst := make([]*float64, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Float64ValueSlice converts a slice of float64 pointers into a slice of -// float64 values -func Float64ValueSlice(src []*float64) []float64 { - dst := make([]float64, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Float64Map converts a string map of float64 values into a string -// map of float64 pointers -func Float64Map(src map[string]float64) map[string]*float64 { - dst := make(map[string]*float64) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Float64ValueMap converts a string map of float64 pointers into a string -// map of float64 values -func Float64ValueMap(src map[string]*float64) map[string]float64 { - dst := make(map[string]float64) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Time returns a pointer to of the time.Time value passed in. -func Time(v time.Time) *time.Time { - return &v -} - -// TimeValue returns the value of the time.Time pointer passed in or -// time.Time{} if the pointer is nil. -func TimeValue(v *time.Time) time.Time { - if v != nil { - return *v - } - return time.Time{} -} - -// TimeSlice converts a slice of time.Time values into a slice of -// time.Time pointers -func TimeSlice(src []time.Time) []*time.Time { - dst := make([]*time.Time, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// TimeValueSlice converts a slice of time.Time pointers into a slice of -// time.Time values -func TimeValueSlice(src []*time.Time) []time.Time { - dst := make([]time.Time, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// TimeMap converts a string map of time.Time values into a string -// map of time.Time pointers -func TimeMap(src map[string]time.Time) map[string]*time.Time { - dst := make(map[string]*time.Time) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// TimeValueMap converts a string map of time.Time pointers into a string -// map of time.Time values -func TimeValueMap(src map[string]*time.Time) map[string]time.Time { - dst := make(map[string]time.Time) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/swag/doc.go b/cmd/bpa-operator/vendor/github.com/go-openapi/swag/doc.go deleted file mode 100644 index e01e1a0..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/swag/doc.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package swag contains a bunch of helper functions for go-openapi and go-swagger projects. - -You may also use it standalone for your projects. - - * convert between value and pointers for builtin types - * convert from string to builtin types (wraps strconv) - * fast json concatenation - * search in path - * load from file or http - * name mangling - - -This repo has only few dependencies outside of the standard library: - - * JSON utilities depend on github.com/mailru/easyjson - * YAML utilities depend on gopkg.in/yaml.v2 -*/ -package swag diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/swag/go.mod b/cmd/bpa-operator/vendor/github.com/go-openapi/swag/go.mod deleted file mode 100644 index 9eb936a..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/swag/go.mod +++ /dev/null @@ -1,9 +0,0 @@ -module github.com/go-openapi/swag - -require ( - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/stretchr/testify v1.2.2 - gopkg.in/yaml.v2 v2.2.1 -) diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/swag/go.sum b/cmd/bpa-operator/vendor/github.com/go-openapi/swag/go.sum deleted file mode 100644 index d6e717b..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/swag/go.sum +++ /dev/null @@ -1,9 +0,0 @@ -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 h1:2gxZ0XQIU/5z3Z3bUBu+FXuk2pFbkN6tcwi/pjyaDic= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/swag/json.go b/cmd/bpa-operator/vendor/github.com/go-openapi/swag/json.go deleted file mode 100644 index 62ab15e..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/swag/json.go +++ /dev/null @@ -1,312 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package swag - -import ( - "bytes" - "encoding/json" - "log" - "reflect" - "strings" - "sync" - - "github.com/mailru/easyjson/jlexer" - "github.com/mailru/easyjson/jwriter" -) - -// nullJSON represents a JSON object with null type -var nullJSON = []byte("null") - -// DefaultJSONNameProvider the default cache for types -var DefaultJSONNameProvider = NewNameProvider() - -const comma = byte(',') - -var closers map[byte]byte - -func init() { - closers = map[byte]byte{ - '{': '}', - '[': ']', - } -} - -type ejMarshaler interface { - MarshalEasyJSON(w *jwriter.Writer) -} - -type ejUnmarshaler interface { - UnmarshalEasyJSON(w *jlexer.Lexer) -} - -// WriteJSON writes json data, prefers finding an appropriate interface to short-circuit the marshaller -// so it takes the fastest option available. -func WriteJSON(data interface{}) ([]byte, error) { - if d, ok := data.(ejMarshaler); ok { - jw := new(jwriter.Writer) - d.MarshalEasyJSON(jw) - return jw.BuildBytes() - } - if d, ok := data.(json.Marshaler); ok { - return d.MarshalJSON() - } - return json.Marshal(data) -} - -// ReadJSON reads json data, prefers finding an appropriate interface to short-circuit the unmarshaller -// so it takes the fastes option available -func ReadJSON(data []byte, value interface{}) error { - trimmedData := bytes.Trim(data, "\x00") - if d, ok := value.(ejUnmarshaler); ok { - jl := &jlexer.Lexer{Data: trimmedData} - d.UnmarshalEasyJSON(jl) - return jl.Error() - } - if d, ok := value.(json.Unmarshaler); ok { - return d.UnmarshalJSON(trimmedData) - } - return json.Unmarshal(trimmedData, value) -} - -// DynamicJSONToStruct converts an untyped json structure into a struct -func DynamicJSONToStruct(data interface{}, target interface{}) error { - // TODO: convert straight to a json typed map (mergo + iterate?) - b, err := WriteJSON(data) - if err != nil { - return err - } - return ReadJSON(b, target) -} - -// ConcatJSON concatenates multiple json objects efficiently -func ConcatJSON(blobs ...[]byte) []byte { - if len(blobs) == 0 { - return nil - } - - last := len(blobs) - 1 - for blobs[last] == nil || bytes.Equal(blobs[last], nullJSON) { - // strips trailing null objects - last = last - 1 - if last < 0 { - // there was nothing but "null"s or nil... - return nil - } - } - if last == 0 { - return blobs[0] - } - - var opening, closing byte - var idx, a int - buf := bytes.NewBuffer(nil) - - for i, b := range blobs[:last+1] { - if b == nil || bytes.Equal(b, nullJSON) { - // a null object is in the list: skip it - continue - } - if len(b) > 0 && opening == 0 { // is this an array or an object? - opening, closing = b[0], closers[b[0]] - } - - if opening != '{' && opening != '[' { - continue // don't know how to concatenate non container objects - } - - if len(b) < 3 { // yep empty but also the last one, so closing this thing - if i == last && a > 0 { - if err := buf.WriteByte(closing); err != nil { - log.Println(err) - } - } - continue - } - - idx = 0 - if a > 0 { // we need to join with a comma for everything beyond the first non-empty item - if err := buf.WriteByte(comma); err != nil { - log.Println(err) - } - idx = 1 // this is not the first or the last so we want to drop the leading bracket - } - - if i != last { // not the last one, strip brackets - if _, err := buf.Write(b[idx : len(b)-1]); err != nil { - log.Println(err) - } - } else { // last one, strip only the leading bracket - if _, err := buf.Write(b[idx:]); err != nil { - log.Println(err) - } - } - a++ - } - // somehow it ended up being empty, so provide a default value - if buf.Len() == 0 { - if err := buf.WriteByte(opening); err != nil { - log.Println(err) - } - if err := buf.WriteByte(closing); err != nil { - log.Println(err) - } - } - return buf.Bytes() -} - -// ToDynamicJSON turns an object into a properly JSON typed structure -func ToDynamicJSON(data interface{}) interface{} { - // TODO: convert straight to a json typed map (mergo + iterate?) - b, err := json.Marshal(data) - if err != nil { - log.Println(err) - } - var res interface{} - if err := json.Unmarshal(b, &res); err != nil { - log.Println(err) - } - return res -} - -// FromDynamicJSON turns an object into a properly JSON typed structure -func FromDynamicJSON(data, target interface{}) error { - b, err := json.Marshal(data) - if err != nil { - log.Println(err) - } - return json.Unmarshal(b, target) -} - -// NameProvider represents an object capabale of translating from go property names -// to json property names -// This type is thread-safe. -type NameProvider struct { - lock *sync.Mutex - index map[reflect.Type]nameIndex -} - -type nameIndex struct { - jsonNames map[string]string - goNames map[string]string -} - -// NewNameProvider creates a new name provider -func NewNameProvider() *NameProvider { - return &NameProvider{ - lock: &sync.Mutex{}, - index: make(map[reflect.Type]nameIndex), - } -} - -func buildnameIndex(tpe reflect.Type, idx, reverseIdx map[string]string) { - for i := 0; i < tpe.NumField(); i++ { - targetDes := tpe.Field(i) - - if targetDes.PkgPath != "" { // unexported - continue - } - - if targetDes.Anonymous { // walk embedded structures tree down first - buildnameIndex(targetDes.Type, idx, reverseIdx) - continue - } - - if tag := targetDes.Tag.Get("json"); tag != "" { - - parts := strings.Split(tag, ",") - if len(parts) == 0 { - continue - } - - nm := parts[0] - if nm == "-" { - continue - } - if nm == "" { // empty string means we want to use the Go name - nm = targetDes.Name - } - - idx[nm] = targetDes.Name - reverseIdx[targetDes.Name] = nm - } - } -} - -func newNameIndex(tpe reflect.Type) nameIndex { - var idx = make(map[string]string, tpe.NumField()) - var reverseIdx = make(map[string]string, tpe.NumField()) - - buildnameIndex(tpe, idx, reverseIdx) - return nameIndex{jsonNames: idx, goNames: reverseIdx} -} - -// GetJSONNames gets all the json property names for a type -func (n *NameProvider) GetJSONNames(subject interface{}) []string { - n.lock.Lock() - defer n.lock.Unlock() - tpe := reflect.Indirect(reflect.ValueOf(subject)).Type() - names, ok := n.index[tpe] - if !ok { - names = n.makeNameIndex(tpe) - } - - res := make([]string, 0, len(names.jsonNames)) - for k := range names.jsonNames { - res = append(res, k) - } - return res -} - -// GetJSONName gets the json name for a go property name -func (n *NameProvider) GetJSONName(subject interface{}, name string) (string, bool) { - tpe := reflect.Indirect(reflect.ValueOf(subject)).Type() - return n.GetJSONNameForType(tpe, name) -} - -// GetJSONNameForType gets the json name for a go property name on a given type -func (n *NameProvider) GetJSONNameForType(tpe reflect.Type, name string) (string, bool) { - n.lock.Lock() - defer n.lock.Unlock() - names, ok := n.index[tpe] - if !ok { - names = n.makeNameIndex(tpe) - } - nme, ok := names.goNames[name] - return nme, ok -} - -func (n *NameProvider) makeNameIndex(tpe reflect.Type) nameIndex { - names := newNameIndex(tpe) - n.index[tpe] = names - return names -} - -// GetGoName gets the go name for a json property name -func (n *NameProvider) GetGoName(subject interface{}, name string) (string, bool) { - tpe := reflect.Indirect(reflect.ValueOf(subject)).Type() - return n.GetGoNameForType(tpe, name) -} - -// GetGoNameForType gets the go name for a given type for a json property name -func (n *NameProvider) GetGoNameForType(tpe reflect.Type, name string) (string, bool) { - n.lock.Lock() - defer n.lock.Unlock() - names, ok := n.index[tpe] - if !ok { - names = n.makeNameIndex(tpe) - } - nme, ok := names.jsonNames[name] - return nme, ok -} diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/swag/loading.go b/cmd/bpa-operator/vendor/github.com/go-openapi/swag/loading.go deleted file mode 100644 index 70f4fb3..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/swag/loading.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package swag - -import ( - "fmt" - "io/ioutil" - "log" - "net/http" - "path/filepath" - "strings" - "time" -) - -// LoadHTTPTimeout the default timeout for load requests -var LoadHTTPTimeout = 30 * time.Second - -// LoadFromFileOrHTTP loads the bytes from a file or a remote http server based on the path passed in -func LoadFromFileOrHTTP(path string) ([]byte, error) { - return LoadStrategy(path, ioutil.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(path) -} - -// LoadFromFileOrHTTPWithTimeout loads the bytes from a file or a remote http server based on the path passed in -// timeout arg allows for per request overriding of the request timeout -func LoadFromFileOrHTTPWithTimeout(path string, timeout time.Duration) ([]byte, error) { - return LoadStrategy(path, ioutil.ReadFile, loadHTTPBytes(timeout))(path) -} - -// LoadStrategy returns a loader function for a given path or uri -func LoadStrategy(path string, local, remote func(string) ([]byte, error)) func(string) ([]byte, error) { - if strings.HasPrefix(path, "http") { - return remote - } - return func(pth string) ([]byte, error) { - upth, err := pathUnescape(pth) - if err != nil { - return nil, err - } - return local(filepath.FromSlash(upth)) - } -} - -func loadHTTPBytes(timeout time.Duration) func(path string) ([]byte, error) { - return func(path string) ([]byte, error) { - client := &http.Client{Timeout: timeout} - req, err := http.NewRequest("GET", path, nil) - if err != nil { - return nil, err - } - resp, err := client.Do(req) - defer func() { - if resp != nil { - if e := resp.Body.Close(); e != nil { - log.Println(e) - } - } - }() - if err != nil { - return nil, err - } - - if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("could not access document at %q [%s] ", path, resp.Status) - } - - return ioutil.ReadAll(resp.Body) - } -} diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/swag/net.go b/cmd/bpa-operator/vendor/github.com/go-openapi/swag/net.go deleted file mode 100644 index 821235f..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/swag/net.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package swag - -import ( - "net" - "strconv" -) - -// SplitHostPort splits a network address into a host and a port. -// The port is -1 when there is no port to be found -func SplitHostPort(addr string) (host string, port int, err error) { - h, p, err := net.SplitHostPort(addr) - if err != nil { - return "", -1, err - } - if p == "" { - return "", -1, &net.AddrError{Err: "missing port in address", Addr: addr} - } - - pi, err := strconv.Atoi(p) - if err != nil { - return "", -1, err - } - return h, pi, nil -} diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/swag/path.go b/cmd/bpa-operator/vendor/github.com/go-openapi/swag/path.go deleted file mode 100644 index 941bd01..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/swag/path.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package swag - -import ( - "os" - "path/filepath" - "runtime" - "strings" -) - -const ( - // GOPATHKey represents the env key for gopath - GOPATHKey = "GOPATH" -) - -// FindInSearchPath finds a package in a provided lists of paths -func FindInSearchPath(searchPath, pkg string) string { - pathsList := filepath.SplitList(searchPath) - for _, path := range pathsList { - if evaluatedPath, err := filepath.EvalSymlinks(filepath.Join(path, "src", pkg)); err == nil { - if _, err := os.Stat(evaluatedPath); err == nil { - return evaluatedPath - } - } - } - return "" -} - -// FindInGoSearchPath finds a package in the $GOPATH:$GOROOT -func FindInGoSearchPath(pkg string) string { - return FindInSearchPath(FullGoSearchPath(), pkg) -} - -// FullGoSearchPath gets the search paths for finding packages -func FullGoSearchPath() string { - allPaths := os.Getenv(GOPATHKey) - if allPaths == "" { - allPaths = filepath.Join(os.Getenv("HOME"), "go") - } - if allPaths != "" { - allPaths = strings.Join([]string{allPaths, runtime.GOROOT()}, ":") - } else { - allPaths = runtime.GOROOT() - } - return allPaths -} diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/swag/post_go18.go b/cmd/bpa-operator/vendor/github.com/go-openapi/swag/post_go18.go deleted file mode 100644 index c2e686d..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/swag/post_go18.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build go1.8 - -package swag - -import "net/url" - -func pathUnescape(path string) (string, error) { - return url.PathUnescape(path) -} diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/swag/post_go19.go b/cmd/bpa-operator/vendor/github.com/go-openapi/swag/post_go19.go deleted file mode 100644 index eb2f2d8..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/swag/post_go19.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build go1.9 - -package swag - -import ( - "sort" - "sync" -) - -// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms. -// Since go1.9, this may be implemented with sync.Map. -type indexOfInitialisms struct { - sortMutex *sync.Mutex - index *sync.Map -} - -func newIndexOfInitialisms() *indexOfInitialisms { - return &indexOfInitialisms{ - sortMutex: new(sync.Mutex), - index: new(sync.Map), - } -} - -func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms { - m.sortMutex.Lock() - defer m.sortMutex.Unlock() - for k, v := range initial { - m.index.Store(k, v) - } - return m -} - -func (m *indexOfInitialisms) isInitialism(key string) bool { - _, ok := m.index.Load(key) - return ok -} - -func (m *indexOfInitialisms) add(key string) *indexOfInitialisms { - m.index.Store(key, true) - return m -} - -func (m *indexOfInitialisms) sorted() (result []string) { - m.sortMutex.Lock() - defer m.sortMutex.Unlock() - m.index.Range(func(key, value interface{}) bool { - k := key.(string) - result = append(result, k) - return true - }) - sort.Sort(sort.Reverse(byInitialism(result))) - return -} diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/swag/pre_go18.go b/cmd/bpa-operator/vendor/github.com/go-openapi/swag/pre_go18.go deleted file mode 100644 index 6607f33..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/swag/pre_go18.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !go1.8 - -package swag - -import "net/url" - -func pathUnescape(path string) (string, error) { - return url.QueryUnescape(path) -} diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/swag/pre_go19.go b/cmd/bpa-operator/vendor/github.com/go-openapi/swag/pre_go19.go deleted file mode 100644 index 4bae187..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/swag/pre_go19.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !go1.9 - -package swag - -import ( - "sort" - "sync" -) - -// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms. -// Before go1.9, this may be implemented with a mutex on the map. -type indexOfInitialisms struct { - getMutex *sync.Mutex - index map[string]bool -} - -func newIndexOfInitialisms() *indexOfInitialisms { - return &indexOfInitialisms{ - getMutex: new(sync.Mutex), - index: make(map[string]bool, 50), - } -} - -func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms { - m.getMutex.Lock() - defer m.getMutex.Unlock() - for k, v := range initial { - m.index[k] = v - } - return m -} - -func (m *indexOfInitialisms) isInitialism(key string) bool { - m.getMutex.Lock() - defer m.getMutex.Unlock() - _, ok := m.index[key] - return ok -} - -func (m *indexOfInitialisms) add(key string) *indexOfInitialisms { - m.getMutex.Lock() - defer m.getMutex.Unlock() - m.index[key] = true - return m -} - -func (m *indexOfInitialisms) sorted() (result []string) { - m.getMutex.Lock() - defer m.getMutex.Unlock() - for k := range m.index { - result = append(result, k) - } - sort.Sort(sort.Reverse(byInitialism(result))) - return -} diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/swag/util.go b/cmd/bpa-operator/vendor/github.com/go-openapi/swag/util.go deleted file mode 100644 index 87c54df..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/swag/util.go +++ /dev/null @@ -1,412 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package swag - -import ( - "math" - "reflect" - "regexp" - "strings" - "sync" - "unicode" -) - -// commonInitialisms are common acronyms that are kept as whole uppercased words. -var commonInitialisms *indexOfInitialisms - -// initialisms is a slice of sorted initialisms -var initialisms []string - -var once sync.Once - -var isInitialism func(string) bool - -var ( - splitRex1 *regexp.Regexp - splitRex2 *regexp.Regexp - splitReplacer *strings.Replacer -) - -func init() { - // Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769 - var configuredInitialisms = map[string]bool{ - "ACL": true, - "API": true, - "ASCII": true, - "CPU": true, - "CSS": true, - "DNS": true, - "EOF": true, - "GUID": true, - "HTML": true, - "HTTPS": true, - "HTTP": true, - "ID": true, - "IP": true, - "JSON": true, - "LHS": true, - "OAI": true, - "QPS": true, - "RAM": true, - "RHS": true, - "RPC": true, - "SLA": true, - "SMTP": true, - "SQL": true, - "SSH": true, - "TCP": true, - "TLS": true, - "TTL": true, - "UDP": true, - "UI": true, - "UID": true, - "UUID": true, - "URI": true, - "URL": true, - "UTF8": true, - "VM": true, - "XML": true, - "XMPP": true, - "XSRF": true, - "XSS": true, - } - - // a thread-safe index of initialisms - commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms) - - // a test function - isInitialism = commonInitialisms.isInitialism -} - -func ensureSorted() { - initialisms = commonInitialisms.sorted() -} - -const ( - //collectionFormatComma = "csv" - collectionFormatSpace = "ssv" - collectionFormatTab = "tsv" - collectionFormatPipe = "pipes" - collectionFormatMulti = "multi" -) - -// JoinByFormat joins a string array by a known format (e.g. swagger's collectionFormat attribute): -// ssv: space separated value -// tsv: tab separated value -// pipes: pipe (|) separated value -// csv: comma separated value (default) -func JoinByFormat(data []string, format string) []string { - if len(data) == 0 { - return data - } - var sep string - switch format { - case collectionFormatSpace: - sep = " " - case collectionFormatTab: - sep = "\t" - case collectionFormatPipe: - sep = "|" - case collectionFormatMulti: - return data - default: - sep = "," - } - return []string{strings.Join(data, sep)} -} - -// SplitByFormat splits a string by a known format: -// ssv: space separated value -// tsv: tab separated value -// pipes: pipe (|) separated value -// csv: comma separated value (default) -// -func SplitByFormat(data, format string) []string { - if data == "" { - return nil - } - var sep string - switch format { - case collectionFormatSpace: - sep = " " - case collectionFormatTab: - sep = "\t" - case collectionFormatPipe: - sep = "|" - case collectionFormatMulti: - return nil - default: - sep = "," - } - var result []string - for _, s := range strings.Split(data, sep) { - if ts := strings.TrimSpace(s); ts != "" { - result = append(result, ts) - } - } - return result -} - -type byInitialism []string - -func (s byInitialism) Len() int { - return len(s) -} -func (s byInitialism) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} -func (s byInitialism) Less(i, j int) bool { - if len(s[i]) != len(s[j]) { - return len(s[i]) < len(s[j]) - } - - return strings.Compare(s[i], s[j]) > 0 -} - -// Prepares strings by splitting by caps, spaces, dashes, and underscore -func split(str string) []string { - // check if consecutive single char things make up an initialism - once.Do(func() { - splitRex1 = regexp.MustCompile(`(\p{Lu})`) - splitRex2 = regexp.MustCompile(`(\pL|\pM|\pN|\p{Pc})+`) - splitReplacer = strings.NewReplacer( - "@", "At ", - "&", "And ", - "|", "Pipe ", - "$", "Dollar ", - "!", "Bang ", - "-", " ", - "_", " ", - ) - ensureSorted() - }) - - str = trim(str) - - // Convert dash and underscore to spaces - str = splitReplacer.Replace(str) - - // Split when uppercase is found (needed for Snake) - str = splitRex1.ReplaceAllString(str, " $1") - - for _, k := range initialisms { - str = strings.Replace(str, splitRex1.ReplaceAllString(k, " $1"), " "+k, -1) - } - // Get the final list of words - //words = rex2.FindAllString(str, -1) - return splitRex2.FindAllString(str, -1) -} - -// Removes leading whitespaces -func trim(str string) string { - return strings.Trim(str, " ") -} - -// Shortcut to strings.ToUpper() -func upper(str string) string { - return strings.ToUpper(trim(str)) -} - -// Shortcut to strings.ToLower() -func lower(str string) string { - return strings.ToLower(trim(str)) -} - -// Camelize an uppercased word -func Camelize(word string) (camelized string) { - for pos, ru := range []rune(word) { - if pos > 0 { - camelized += string(unicode.ToLower(ru)) - } else { - camelized += string(unicode.ToUpper(ru)) - } - } - return -} - -// ToFileName lowercases and underscores a go type name -func ToFileName(name string) string { - in := split(name) - out := make([]string, 0, len(in)) - - for _, w := range in { - out = append(out, lower(w)) - } - - return strings.Join(out, "_") -} - -// ToCommandName lowercases and underscores a go type name -func ToCommandName(name string) string { - in := split(name) - out := make([]string, 0, len(in)) - - for _, w := range in { - out = append(out, lower(w)) - } - return strings.Join(out, "-") -} - -// ToHumanNameLower represents a code name as a human series of words -func ToHumanNameLower(name string) string { - in := split(name) - out := make([]string, 0, len(in)) - - for _, w := range in { - if !isInitialism(upper(w)) { - out = append(out, lower(w)) - } else { - out = append(out, w) - } - } - return strings.Join(out, " ") -} - -// ToHumanNameTitle represents a code name as a human series of words with the first letters titleized -func ToHumanNameTitle(name string) string { - in := split(name) - out := make([]string, 0, len(in)) - - for _, w := range in { - uw := upper(w) - if !isInitialism(uw) { - out = append(out, Camelize(w)) - } else { - out = append(out, w) - } - } - return strings.Join(out, " ") -} - -// ToJSONName camelcases a name which can be underscored or pascal cased -func ToJSONName(name string) string { - in := split(name) - out := make([]string, 0, len(in)) - - for i, w := range in { - if i == 0 { - out = append(out, lower(w)) - continue - } - out = append(out, Camelize(w)) - } - return strings.Join(out, "") -} - -// ToVarName camelcases a name which can be underscored or pascal cased -func ToVarName(name string) string { - res := ToGoName(name) - if isInitialism(res) { - return lower(res) - } - if len(res) <= 1 { - return lower(res) - } - return lower(res[:1]) + res[1:] -} - -// ToGoName translates a swagger name which can be underscored or camel cased to a name that golint likes -func ToGoName(name string) string { - in := split(name) - out := make([]string, 0, len(in)) - - for _, w := range in { - uw := upper(w) - mod := int(math.Min(float64(len(uw)), 2)) - if !isInitialism(uw) && !isInitialism(uw[:len(uw)-mod]) { - uw = Camelize(w) - } - out = append(out, uw) - } - - result := strings.Join(out, "") - if len(result) > 0 { - if !unicode.IsUpper([]rune(result)[0]) { - result = "X" + result - } - } - return result -} - -// ContainsStrings searches a slice of strings for a case-sensitive match -func ContainsStrings(coll []string, item string) bool { - for _, a := range coll { - if a == item { - return true - } - } - return false -} - -// ContainsStringsCI searches a slice of strings for a case-insensitive match -func ContainsStringsCI(coll []string, item string) bool { - for _, a := range coll { - if strings.EqualFold(a, item) { - return true - } - } - return false -} - -type zeroable interface { - IsZero() bool -} - -// IsZero returns true when the value passed into the function is a zero value. -// This allows for safer checking of interface values. -func IsZero(data interface{}) bool { - // check for things that have an IsZero method instead - if vv, ok := data.(zeroable); ok { - return vv.IsZero() - } - // continue with slightly more complex reflection - v := reflect.ValueOf(data) - switch v.Kind() { - case reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return v.IsNil() - case reflect.Struct, reflect.Array: - return reflect.DeepEqual(data, reflect.Zero(v.Type()).Interface()) - case reflect.Invalid: - return true - } - return false -} - -// AddInitialisms add additional initialisms -func AddInitialisms(words ...string) { - for _, word := range words { - //commonInitialisms[upper(word)] = true - commonInitialisms.add(upper(word)) - } - // sort again - initialisms = commonInitialisms.sorted() -} - -// CommandLineOptionsGroup represents a group of user-defined command line options -type CommandLineOptionsGroup struct { - ShortDescription string - LongDescription string - Options interface{} -} diff --git a/cmd/bpa-operator/vendor/github.com/go-openapi/swag/yaml.go b/cmd/bpa-operator/vendor/github.com/go-openapi/swag/yaml.go deleted file mode 100644 index 435e294..0000000 --- a/cmd/bpa-operator/vendor/github.com/go-openapi/swag/yaml.go +++ /dev/null @@ -1,227 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package swag - -import ( - "encoding/json" - "fmt" - "path/filepath" - "strconv" - - "github.com/mailru/easyjson/jlexer" - "github.com/mailru/easyjson/jwriter" - yaml "gopkg.in/yaml.v2" -) - -// YAMLMatcher matches yaml -func YAMLMatcher(path string) bool { - ext := filepath.Ext(path) - return ext == ".yaml" || ext == ".yml" -} - -// YAMLToJSON converts YAML unmarshaled data into json compatible data -func YAMLToJSON(data interface{}) (json.RawMessage, error) { - jm, err := transformData(data) - if err != nil { - return nil, err - } - b, err := WriteJSON(jm) - return json.RawMessage(b), err -} - -// BytesToYAMLDoc converts a byte slice into a YAML document -func BytesToYAMLDoc(data []byte) (interface{}, error) { - var canary map[interface{}]interface{} // validate this is an object and not a different type - if err := yaml.Unmarshal(data, &canary); err != nil { - return nil, err - } - - var document yaml.MapSlice // preserve order that is present in the document - if err := yaml.Unmarshal(data, &document); err != nil { - return nil, err - } - return document, nil -} - -// JSONMapSlice represent a JSON object, with the order of keys maintained -type JSONMapSlice []JSONMapItem - -// MarshalJSON renders a JSONMapSlice as JSON -func (s JSONMapSlice) MarshalJSON() ([]byte, error) { - w := &jwriter.Writer{Flags: jwriter.NilMapAsEmpty | jwriter.NilSliceAsEmpty} - s.MarshalEasyJSON(w) - return w.BuildBytes() -} - -// MarshalEasyJSON renders a JSONMapSlice as JSON, using easyJSON -func (s JSONMapSlice) MarshalEasyJSON(w *jwriter.Writer) { - w.RawByte('{') - - ln := len(s) - last := ln - 1 - for i := 0; i < ln; i++ { - s[i].MarshalEasyJSON(w) - if i != last { // last item - w.RawByte(',') - } - } - - w.RawByte('}') -} - -// UnmarshalJSON makes a JSONMapSlice from JSON -func (s *JSONMapSlice) UnmarshalJSON(data []byte) error { - l := jlexer.Lexer{Data: data} - s.UnmarshalEasyJSON(&l) - return l.Error() -} - -// UnmarshalEasyJSON makes a JSONMapSlice from JSON, using easyJSON -func (s *JSONMapSlice) UnmarshalEasyJSON(in *jlexer.Lexer) { - if in.IsNull() { - in.Skip() - return - } - - var result JSONMapSlice - in.Delim('{') - for !in.IsDelim('}') { - var mi JSONMapItem - mi.UnmarshalEasyJSON(in) - result = append(result, mi) - } - *s = result -} - -// JSONMapItem represents the value of a key in a JSON object held by JSONMapSlice -type JSONMapItem struct { - Key string - Value interface{} -} - -// MarshalJSON renders a JSONMapItem as JSON -func (s JSONMapItem) MarshalJSON() ([]byte, error) { - w := &jwriter.Writer{Flags: jwriter.NilMapAsEmpty | jwriter.NilSliceAsEmpty} - s.MarshalEasyJSON(w) - return w.BuildBytes() -} - -// MarshalEasyJSON renders a JSONMapItem as JSON, using easyJSON -func (s JSONMapItem) MarshalEasyJSON(w *jwriter.Writer) { - w.String(s.Key) - w.RawByte(':') - w.Raw(WriteJSON(s.Value)) -} - -// UnmarshalJSON makes a JSONMapItem from JSON -func (s *JSONMapItem) UnmarshalJSON(data []byte) error { - l := jlexer.Lexer{Data: data} - s.UnmarshalEasyJSON(&l) - return l.Error() -} - -// UnmarshalEasyJSON makes a JSONMapItem from JSON, using easyJSON -func (s *JSONMapItem) UnmarshalEasyJSON(in *jlexer.Lexer) { - key := in.UnsafeString() - in.WantColon() - value := in.Interface() - in.WantComma() - s.Key = key - s.Value = value -} - -func transformData(input interface{}) (out interface{}, err error) { - switch in := input.(type) { - case yaml.MapSlice: - - o := make(JSONMapSlice, len(in)) - for i, mi := range in { - var nmi JSONMapItem - switch k := mi.Key.(type) { - case string: - nmi.Key = k - case int: - nmi.Key = strconv.Itoa(k) - default: - return nil, fmt.Errorf("types don't match expect map key string or int got: %T", mi.Key) - } - - v, ert := transformData(mi.Value) - if ert != nil { - return nil, ert - } - nmi.Value = v - o[i] = nmi - } - return o, nil - case map[interface{}]interface{}: - o := make(JSONMapSlice, 0, len(in)) - for ke, va := range in { - var nmi JSONMapItem - switch k := ke.(type) { - case string: - nmi.Key = k - case int: - nmi.Key = strconv.Itoa(k) - default: - return nil, fmt.Errorf("types don't match expect map key string or int got: %T", ke) - } - - v, ert := transformData(va) - if ert != nil { - return nil, ert - } - nmi.Value = v - o = append(o, nmi) - } - return o, nil - case []interface{}: - len1 := len(in) - o := make([]interface{}, len1) - for i := 0; i < len1; i++ { - o[i], err = transformData(in[i]) - if err != nil { - return nil, err - } - } - return o, nil - } - return input, nil -} - -// YAMLDoc loads a yaml document from either http or a file and converts it to json -func YAMLDoc(path string) (json.RawMessage, error) { - yamlDoc, err := YAMLData(path) - if err != nil { - return nil, err - } - - data, err := YAMLToJSON(yamlDoc) - if err != nil { - return nil, err - } - - return data, nil -} - -// YAMLData loads a yaml document from either http or a file -func YAMLData(path string) (interface{}, error) { - data, err := LoadFromFileOrHTTP(path) - if err != nil { - return nil, err - } - - return BytesToYAMLDoc(data) -} diff --git a/cmd/bpa-operator/vendor/github.com/gobuffalo/envy/.env b/cmd/bpa-operator/vendor/github.com/gobuffalo/envy/.env deleted file mode 100644 index 33eeb3b..0000000 --- a/cmd/bpa-operator/vendor/github.com/gobuffalo/envy/.env +++ /dev/null @@ -1,5 +0,0 @@ -# This is a comment -# We can use equal or colon notation -DIR: root -FLAVOUR: none -INSIDE_FOLDER=false \ No newline at end of file diff --git a/cmd/bpa-operator/vendor/github.com/gobuffalo/envy/.gitignore b/cmd/bpa-operator/vendor/github.com/gobuffalo/envy/.gitignore deleted file mode 100644 index 3689718..0000000 --- a/cmd/bpa-operator/vendor/github.com/gobuffalo/envy/.gitignore +++ /dev/null @@ -1,29 +0,0 @@ -*.log -.DS_Store -doc -tmp -pkg -*.gem -*.pid -coverage -coverage.data -build/* -*.pbxuser -*.mode1v3 -.svn -profile -.console_history -.sass-cache/* -.rake_tasks~ -*.log.lck -solr/ -.jhw-cache/ -jhw.* -*.sublime* -node_modules/ -dist/ -generated/ -.vendor/ -bin/* -gin-bin -.idea/ diff --git a/cmd/bpa-operator/vendor/github.com/gobuffalo/envy/.gometalinter.json b/cmd/bpa-operator/vendor/github.com/gobuffalo/envy/.gometalinter.json deleted file mode 100644 index e4f65a3..0000000 --- a/cmd/bpa-operator/vendor/github.com/gobuffalo/envy/.gometalinter.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "Enable": ["vet", "golint", "goimports", "deadcode", "gotype", "ineffassign", "misspell", "nakedret", "unconvert", "megacheck", "varcheck"] -} diff --git a/cmd/bpa-operator/vendor/github.com/gobuffalo/envy/.travis.yml b/cmd/bpa-operator/vendor/github.com/gobuffalo/envy/.travis.yml deleted file mode 100644 index 1fb041a..0000000 --- a/cmd/bpa-operator/vendor/github.com/gobuffalo/envy/.travis.yml +++ /dev/null @@ -1,36 +0,0 @@ -language: go - -sudo: false - -matrix: - include: - - os: linux - go: "1.9.x" - - os: windows - go: "1.9.x" - - os: linux - go: "1.10.x" - - os: windows - go: "1.10.x" - - os: linux - go: "1.11.x" - env: - - GO111MODULE=off - - os: windows - go: "1.11.x" - env: - - GO111MODULE=off - - os: linux - go: "1.11.x" - env: - - GO111MODULE=on - - os: windows - go: "1.11.x" - env: - - GO111MODULE=on - -install: false - -script: - - go get -v -t ./... - - go test -v -timeout=5s -race ./... diff --git a/cmd/bpa-operator/vendor/github.com/gobuffalo/envy/LICENSE.txt b/cmd/bpa-operator/vendor/github.com/gobuffalo/envy/LICENSE.txt deleted file mode 100644 index 123ddc0..0000000 --- a/cmd/bpa-operator/vendor/github.com/gobuffalo/envy/LICENSE.txt +++ /dev/null @@ -1,8 +0,0 @@ -The MIT License (MIT) -Copyright (c) 2018 Mark Bates - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/cmd/bpa-operator/vendor/github.com/gobuffalo/envy/Makefile b/cmd/bpa-operator/vendor/github.com/gobuffalo/envy/Makefile deleted file mode 100644 index 46aece8..0000000 --- a/cmd/bpa-operator/vendor/github.com/gobuffalo/envy/Makefile +++ /dev/null @@ -1,46 +0,0 @@ -TAGS ?= "sqlite" -GO_BIN ?= go - -install: - packr2 - $(GO_BIN) install -v . - -deps: - $(GO_BIN) get github.com/gobuffalo/release - $(GO_BIN) get github.com/gobuffalo/packr/v2/packr2 - $(GO_BIN) get -tags ${TAGS} -t ./... -ifeq ($(GO111MODULE),on) - $(GO_BIN) mod tidy -endif - -build: - packr2 - $(GO_BIN) build -v . - -test: - packr2 - $(GO_BIN) test -tags ${TAGS} ./... - -ci-test: - $(GO_BIN) test -tags ${TAGS} -race ./... - -lint: - gometalinter --vendor ./... --deadline=1m --skip=internal - -update: - $(GO_BIN) get -u -tags ${TAGS} -ifeq ($(GO111MODULE),on) - $(GO_BIN) mod tidy -endif - packr2 - make test - make install -ifeq ($(GO111MODULE),on) - $(GO_BIN) mod tidy -endif - -release-test: - $(GO_BIN) test -tags ${TAGS} -race ./... - -release: - release -y -f version.go diff --git a/cmd/bpa-operator/vendor/github.com/gobuffalo/envy/README.md b/cmd/bpa-operator/vendor/github.com/gobuffalo/envy/README.md deleted file mode 100644 index f54462a..0000000 --- a/cmd/bpa-operator/vendor/github.com/gobuffalo/envy/README.md +++ /dev/null @@ -1,93 +0,0 @@ -# envy -[![Build Status](https://travis-ci.org/gobuffalo/envy.svg?branch=master)](https://travis-ci.org/gobuffalo/envy) - -Envy makes working with ENV variables in Go trivial. - -* Get ENV variables with default values. -* Set ENV variables safely without affecting the underlying system. -* Temporarily change ENV vars; useful for testing. -* Map all of the key/values in the ENV. -* Loads .env files (by using [godotenv](https://github.com/joho/godotenv/)) -* More! - -## Installation - -```text -$ go get -u github.com/gobuffalo/envy -``` - -## Usage - -```go -func Test_Get(t *testing.T) { - r := require.New(t) - r.NotZero(os.Getenv("GOPATH")) - r.Equal(os.Getenv("GOPATH"), envy.Get("GOPATH", "foo")) - r.Equal("bar", envy.Get("IDONTEXIST", "bar")) -} - -func Test_MustGet(t *testing.T) { - r := require.New(t) - r.NotZero(os.Getenv("GOPATH")) - v, err := envy.MustGet("GOPATH") - r.NoError(err) - r.Equal(os.Getenv("GOPATH"), v) - - _, err = envy.MustGet("IDONTEXIST") - r.Error(err) -} - -func Test_Set(t *testing.T) { - r := require.New(t) - _, err := envy.MustGet("FOO") - r.Error(err) - - envy.Set("FOO", "foo") - r.Equal("foo", envy.Get("FOO", "bar")) -} - -func Test_Temp(t *testing.T) { - r := require.New(t) - - _, err := envy.MustGet("BAR") - r.Error(err) - - envy.Temp(func() { - envy.Set("BAR", "foo") - r.Equal("foo", envy.Get("BAR", "bar")) - _, err = envy.MustGet("BAR") - r.NoError(err) - }) - - _, err = envy.MustGet("BAR") - r.Error(err) -} -``` -## .env files support - -Envy now supports loading `.env` files by using the [godotenv library](https://github.com/joho/godotenv/). -That means one can use and define multiple `.env` files which will be loaded on-demand. By default, no env files will be loaded. To load one or more, you need to call the `envy.Load` function in one of the following ways: - -```go -envy.Load() // 1 - -envy.Load("MY_ENV_FILE") // 2 - -envy.Load(".env", ".env.prod") // 3 - -envy.Load(".env", "NON_EXISTING_FILE") // 4 - -// 5 -envy.Load(".env") -envy.Load("NON_EXISTING_FILE") - -// 6 -envy.Load(".env", "NON_EXISTING_FILE", ".env.prod") -``` - -1. Will load the default `.env` file -2. Will load the file `MY_ENV_FILE`, **but not** `.env` -3. Will load the file `.env`, and after that will load the `.env.prod` file. If any variable is redefined in `. env.prod` it will be overwritten (will contain the `env.prod` value) -4. Will load the `.env` file and return an error as the second file does not exist. The values in `.env` will be loaded and available. -5. Same as 4 -6. Will load the `.env` file and return an error as the second file does not exist. The values in `.env` will be loaded and available, **but the ones in** `.env.prod` **won't**. diff --git a/cmd/bpa-operator/vendor/github.com/gobuffalo/envy/SHOULDERS.md b/cmd/bpa-operator/vendor/github.com/gobuffalo/envy/SHOULDERS.md deleted file mode 100644 index 6c9cd7b..0000000 --- a/cmd/bpa-operator/vendor/github.com/gobuffalo/envy/SHOULDERS.md +++ /dev/null @@ -1,16 +0,0 @@ -# `github.com/gobuffalo/envy` Stands on the Shoulders of Giants - -`github.com/gobuffalo/envy` does not try to reinvent the wheel! Instead, it uses the already great wheels developed by the Go community and puts them all together in the best way possible. Without these giants this project would not be possible. Please make sure to check them out and thank them for all of their hard work. - -Thank you to the following **GIANTS**: - - -* [github.com/gobuffalo/envy](https://godoc.org/github.com/gobuffalo/envy) - -* [github.com/joho/godotenv](https://godoc.org/github.com/joho/godotenv) - -* [github.com/rogpeppe/go-internal/modfile](https://godoc.org/github.com/rogpeppe/go-internal/modfile) - -* [github.com/rogpeppe/go-internal/module](https://godoc.org/github.com/rogpeppe/go-internal/module) - -* [github.com/rogpeppe/go-internal/semver](https://godoc.org/github.com/rogpeppe/go-internal/semver) diff --git a/cmd/bpa-operator/vendor/github.com/gobuffalo/envy/envy.go b/cmd/bpa-operator/vendor/github.com/gobuffalo/envy/envy.go deleted file mode 100644 index d3995f4..0000000 --- a/cmd/bpa-operator/vendor/github.com/gobuffalo/envy/envy.go +++ /dev/null @@ -1,268 +0,0 @@ -/* -package envy makes working with ENV variables in Go trivial. - -* Get ENV variables with default values. -* Set ENV variables safely without affecting the underlying system. -* Temporarily change ENV vars; useful for testing. -* Map all of the key/values in the ENV. -* Loads .env files (by using [godotenv](https://github.com/joho/godotenv/)) -* More! -*/ -package envy - -import ( - "errors" - "flag" - "fmt" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "runtime" - "strings" - "sync" - - "github.com/joho/godotenv" - "github.com/rogpeppe/go-internal/modfile" -) - -var gil = &sync.RWMutex{} -var env = map[string]string{} - -// GO111MODULE is ENV for turning mods on/off -const GO111MODULE = "GO111MODULE" - -func init() { - Load() - loadEnv() -} - -// Load the ENV variables to the env map -func loadEnv() { - gil.Lock() - defer gil.Unlock() - - if os.Getenv("GO_ENV") == "" { - // if the flag "test.v" is *defined*, we're running as a unit test. Note that we don't care - // about v.Value (verbose test mode); we just want to know if the test environment has defined - // it. It's also possible that the flags are not yet fully parsed (i.e. flag.Parsed() == false), - // so we could not depend on v.Value anyway. - // - if v := flag.Lookup("test.v"); v != nil { - env["GO_ENV"] = "test" - } - } - - // set the GOPATH if using >= 1.8 and the GOPATH isn't set - if os.Getenv("GOPATH") == "" { - out, err := exec.Command("go", "env", "GOPATH").Output() - if err == nil { - gp := strings.TrimSpace(string(out)) - os.Setenv("GOPATH", gp) - } - } - - for _, e := range os.Environ() { - pair := strings.Split(e, "=") - env[pair[0]] = os.Getenv(pair[0]) - } -} - -func Mods() bool { - return Get(GO111MODULE, "off") == "on" -} - -// Reload the ENV variables. Useful if -// an external ENV manager has been used -func Reload() { - env = map[string]string{} - loadEnv() -} - -// Load .env files. Files will be loaded in the same order that are received. -// Redefined vars will override previously existing values. -// IE: envy.Load(".env", "test_env/.env") will result in DIR=test_env -// If no arg passed, it will try to load a .env file. -func Load(files ...string) error { - - // If no files received, load the default one - if len(files) == 0 { - err := godotenv.Overload() - if err == nil { - Reload() - } - return err - } - - // We received a list of files - for _, file := range files { - - // Check if it exists or we can access - if _, err := os.Stat(file); err != nil { - // It does not exist or we can not access. - // Return and stop loading - return err - } - - // It exists and we have permission. Load it - if err := godotenv.Overload(file); err != nil { - return err - } - - // Reload the env so all new changes are noticed - Reload() - - } - return nil -} - -// Get a value from the ENV. If it doesn't exist the -// default value will be returned. -func Get(key string, value string) string { - gil.RLock() - defer gil.RUnlock() - if v, ok := env[key]; ok { - return v - } - return value -} - -// Get a value from the ENV. If it doesn't exist -// an error will be returned -func MustGet(key string) (string, error) { - gil.RLock() - defer gil.RUnlock() - if v, ok := env[key]; ok { - return v, nil - } - return "", fmt.Errorf("could not find ENV var with %s", key) -} - -// Set a value into the ENV. This is NOT permanent. It will -// only affect values accessed through envy. -func Set(key string, value string) { - gil.Lock() - defer gil.Unlock() - env[key] = value -} - -// MustSet the value into the underlying ENV, as well as envy. -// This may return an error if there is a problem setting the -// underlying ENV value. -func MustSet(key string, value string) error { - gil.Lock() - defer gil.Unlock() - err := os.Setenv(key, value) - if err != nil { - return err - } - env[key] = value - return nil -} - -// Map all of the keys/values set in envy. -func Map() map[string]string { - gil.RLock() - defer gil.RUnlock() - cp := map[string]string{} - for k, v := range env { - cp[k] = v - } - return env -} - -// Temp makes a copy of the values and allows operation on -// those values temporarily during the run of the function. -// At the end of the function run the copy is discarded and -// the original values are replaced. This is useful for testing. -// Warning: This function is NOT safe to use from a goroutine or -// from code which may access any Get or Set function from a goroutine -func Temp(f func()) { - oenv := env - env = map[string]string{} - for k, v := range oenv { - env[k] = v - } - defer func() { env = oenv }() - f() -} - -func GoPath() string { - return Get("GOPATH", "") -} - -func GoBin() string { - return Get("GO_BIN", "go") -} - -func InGoPath() bool { - pwd, _ := os.Getwd() - for _, p := range GoPaths() { - if strings.HasPrefix(pwd, p) { - return true - } - } - return false -} - -// GoPaths returns all possible GOPATHS that are set. -func GoPaths() []string { - gp := Get("GOPATH", "") - if runtime.GOOS == "windows" { - return strings.Split(gp, ";") // Windows uses a different separator - } - return strings.Split(gp, ":") -} - -func importPath(path string) string { - path = strings.TrimPrefix(path, "/private") - for _, gopath := range GoPaths() { - srcpath := filepath.Join(gopath, "src") - rel, err := filepath.Rel(srcpath, path) - if err == nil { - return filepath.ToSlash(rel) - } - } - - // fallback to trim - rel := strings.TrimPrefix(path, filepath.Join(GoPath(), "src")) - rel = strings.TrimPrefix(rel, string(filepath.Separator)) - return filepath.ToSlash(rel) -} - -// CurrentModule will attempt to return the module name from `go.mod` if -// modules are enabled. -// If modules are not enabled it will fallback to using CurrentPackage instead. -func CurrentModule() (string, error) { - if !Mods() { - return CurrentPackage(), nil - } - moddata, err := ioutil.ReadFile("go.mod") - if err != nil { - return "", errors.New("go.mod cannot be read or does not exist while go module is enabled") - } - packagePath := modfile.ModulePath(moddata) - if packagePath == "" { - return "", errors.New("go.mod is malformed") - } - return packagePath, nil -} - -// CurrentPackage attempts to figure out the current package name from the PWD -// Use CurrentModule for a more accurate package name. -func CurrentPackage() string { - if Mods() { - } - pwd, _ := os.Getwd() - return importPath(pwd) -} - -func Environ() []string { - gil.RLock() - defer gil.RUnlock() - var e []string - for k, v := range env { - e = append(e, fmt.Sprintf("%s=%s", k, v)) - } - return e -} diff --git a/cmd/bpa-operator/vendor/github.com/gobuffalo/envy/go.mod b/cmd/bpa-operator/vendor/github.com/gobuffalo/envy/go.mod deleted file mode 100644 index d951b7c..0000000 --- a/cmd/bpa-operator/vendor/github.com/gobuffalo/envy/go.mod +++ /dev/null @@ -1,8 +0,0 @@ -module github.com/gobuffalo/envy - -require ( - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/joho/godotenv v1.3.0 - github.com/rogpeppe/go-internal v1.1.0 - github.com/stretchr/testify v1.3.0 -) diff --git a/cmd/bpa-operator/vendor/github.com/gobuffalo/envy/go.sum b/cmd/bpa-operator/vendor/github.com/gobuffalo/envy/go.sum deleted file mode 100644 index f11ef4c..0000000 --- a/cmd/bpa-operator/vendor/github.com/gobuffalo/envy/go.sum +++ /dev/null @@ -1,17 +0,0 @@ -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= -github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rogpeppe/go-internal v1.1.0 h1:g0fH8RicVgNl+zVZDCDfbdWxAWoAEJyI7I3TZYXFiig= -github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= diff --git a/cmd/bpa-operator/vendor/github.com/gobuffalo/envy/version.go b/cmd/bpa-operator/vendor/github.com/gobuffalo/envy/version.go deleted file mode 100644 index 0bff116..0000000 --- a/cmd/bpa-operator/vendor/github.com/gobuffalo/envy/version.go +++ /dev/null @@ -1,3 +0,0 @@ -package envy - -const Version = "v1.6.15" diff --git a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/AUTHORS b/cmd/bpa-operator/vendor/github.com/gogo/protobuf/AUTHORS deleted file mode 100644 index 3d97fc7..0000000 --- a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/AUTHORS +++ /dev/null @@ -1,15 +0,0 @@ -# This is the official list of GoGo authors for copyright purposes. -# This file is distinct from the CONTRIBUTORS file, which -# lists people. For example, employees are listed in CONTRIBUTORS, -# but not in AUTHORS, because the employer holds the copyright. - -# Names should be added to this file as one of -# Organization's name -# Individual's name -# Individual's name - -# Please keep the list sorted. - -Sendgrid, Inc -Vastech SA (PTY) LTD -Walter Schulze diff --git a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/CONTRIBUTORS b/cmd/bpa-operator/vendor/github.com/gogo/protobuf/CONTRIBUTORS deleted file mode 100644 index 1b4f6c2..0000000 --- a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/CONTRIBUTORS +++ /dev/null @@ -1,23 +0,0 @@ -Anton Povarov -Brian Goff -Clayton Coleman -Denis Smirnov -DongYun Kang -Dwayne Schultz -Georg Apitz -Gustav Paul -Johan Brandhorst -John Shahid -John Tuley -Laurent -Patrick Lee -Peter Edge -Roger Johansson -Sam Nguyen -Sergio Arbeo -Stephen J Day -Tamir Duberstein -Todd Eisenberger -Tormod Erevik Lea -Vyacheslav Kim -Walter Schulze diff --git a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/LICENSE b/cmd/bpa-operator/vendor/github.com/gogo/protobuf/LICENSE deleted file mode 100644 index f57de90..0000000 --- a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/LICENSE +++ /dev/null @@ -1,35 +0,0 @@ -Copyright (c) 2013, The GoGo Authors. All rights reserved. - -Protocol Buffers for Go with Gadgets - -Go support for Protocol Buffers - Google's data interchange format - -Copyright 2010 The Go Authors. All rights reserved. -https://github.com/golang/protobuf - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/Makefile b/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/Makefile deleted file mode 100644 index 00d65f3..0000000 --- a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/Makefile +++ /dev/null @@ -1,43 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -install: - go install - -test: install generate-test-pbs - go test - - -generate-test-pbs: - make install - make -C test_proto - make -C proto3_proto - make diff --git a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/clone.go b/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/clone.go deleted file mode 100644 index a26b046..0000000 --- a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/clone.go +++ /dev/null @@ -1,258 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer deep copy and merge. -// TODO: RawMessage. - -package proto - -import ( - "fmt" - "log" - "reflect" - "strings" -) - -// Clone returns a deep copy of a protocol buffer. -func Clone(src Message) Message { - in := reflect.ValueOf(src) - if in.IsNil() { - return src - } - out := reflect.New(in.Type().Elem()) - dst := out.Interface().(Message) - Merge(dst, src) - return dst -} - -// Merger is the interface representing objects that can merge messages of the same type. -type Merger interface { - // Merge merges src into this message. - // Required and optional fields that are set in src will be set to that value in dst. - // Elements of repeated fields will be appended. - // - // Merge may panic if called with a different argument type than the receiver. - Merge(src Message) -} - -// generatedMerger is the custom merge method that generated protos will have. -// We must add this method since a generate Merge method will conflict with -// many existing protos that have a Merge data field already defined. -type generatedMerger interface { - XXX_Merge(src Message) -} - -// Merge merges src into dst. -// Required and optional fields that are set in src will be set to that value in dst. -// Elements of repeated fields will be appended. -// Merge panics if src and dst are not the same type, or if dst is nil. -func Merge(dst, src Message) { - if m, ok := dst.(Merger); ok { - m.Merge(src) - return - } - - in := reflect.ValueOf(src) - out := reflect.ValueOf(dst) - if out.IsNil() { - panic("proto: nil destination") - } - if in.Type() != out.Type() { - panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src)) - } - if in.IsNil() { - return // Merge from nil src is a noop - } - if m, ok := dst.(generatedMerger); ok { - m.XXX_Merge(src) - return - } - mergeStruct(out.Elem(), in.Elem()) -} - -func mergeStruct(out, in reflect.Value) { - sprop := GetProperties(in.Type()) - for i := 0; i < in.NumField(); i++ { - f := in.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) - } - - if emIn, ok := in.Addr().Interface().(extensionsBytes); ok { - emOut := out.Addr().Interface().(extensionsBytes) - bIn := emIn.GetExtensions() - bOut := emOut.GetExtensions() - *bOut = append(*bOut, *bIn...) - } else if emIn, err := extendable(in.Addr().Interface()); err == nil { - emOut, _ := extendable(out.Addr().Interface()) - mIn, muIn := emIn.extensionsRead() - if mIn != nil { - mOut := emOut.extensionsWrite() - muIn.Lock() - mergeExtension(mOut, mIn) - muIn.Unlock() - } - } - - uf := in.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return - } - uin := uf.Bytes() - if len(uin) > 0 { - out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) - } -} - -// mergeAny performs a merge between two values of the same type. -// viaPtr indicates whether the values were indirected through a pointer (implying proto2). -// prop is set if this is a struct field (it may be nil). -func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { - if in.Type() == protoMessageType { - if !in.IsNil() { - if out.IsNil() { - out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) - } else { - Merge(out.Interface().(Message), in.Interface().(Message)) - } - } - return - } - switch in.Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - if !viaPtr && isProto3Zero(in) { - return - } - out.Set(in) - case reflect.Interface: - // Probably a oneof field; copy non-nil values. - if in.IsNil() { - return - } - // Allocate destination if it is not set, or set to a different type. - // Otherwise we will merge as normal. - if out.IsNil() || out.Elem().Type() != in.Elem().Type() { - out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) - } - mergeAny(out.Elem(), in.Elem(), false, nil) - case reflect.Map: - if in.Len() == 0 { - return - } - if out.IsNil() { - out.Set(reflect.MakeMap(in.Type())) - } - // For maps with value types of *T or []byte we need to deep copy each value. - elemKind := in.Type().Elem().Kind() - for _, key := range in.MapKeys() { - var val reflect.Value - switch elemKind { - case reflect.Ptr: - val = reflect.New(in.Type().Elem().Elem()) - mergeAny(val, in.MapIndex(key), false, nil) - case reflect.Slice: - val = in.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - default: - val = in.MapIndex(key) - } - out.SetMapIndex(key, val) - } - case reflect.Ptr: - if in.IsNil() { - return - } - if out.IsNil() { - out.Set(reflect.New(in.Elem().Type())) - } - mergeAny(out.Elem(), in.Elem(), true, nil) - case reflect.Slice: - if in.IsNil() { - return - } - if in.Type().Elem().Kind() == reflect.Uint8 { - // []byte is a scalar bytes field, not a repeated field. - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value, and should not - // be merged. - if prop != nil && prop.proto3 && in.Len() == 0 { - return - } - - // Make a deep copy. - // Append to []byte{} instead of []byte(nil) so that we never end up - // with a nil result. - out.SetBytes(append([]byte{}, in.Bytes()...)) - return - } - n := in.Len() - if out.IsNil() { - out.Set(reflect.MakeSlice(in.Type(), 0, n)) - } - switch in.Type().Elem().Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - out.Set(reflect.AppendSlice(out, in)) - default: - for i := 0; i < n; i++ { - x := reflect.Indirect(reflect.New(in.Type().Elem())) - mergeAny(x, in.Index(i), false, nil) - out.Set(reflect.Append(out, x)) - } - } - case reflect.Struct: - mergeStruct(out, in) - default: - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to copy %v", in) - } -} - -func mergeExtension(out, in map[int32]Extension) { - for extNum, eIn := range in { - eOut := Extension{desc: eIn.desc} - if eIn.value != nil { - v := reflect.New(reflect.TypeOf(eIn.value)).Elem() - mergeAny(v, reflect.ValueOf(eIn.value), false, nil) - eOut.value = v.Interface() - } - if eIn.enc != nil { - eOut.enc = make([]byte, len(eIn.enc)) - copy(eOut.enc, eIn.enc) - } - - out[extNum] = eOut - } -} diff --git a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/custom_gogo.go b/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/custom_gogo.go deleted file mode 100644 index 2455248..0000000 --- a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/custom_gogo.go +++ /dev/null @@ -1,39 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import "reflect" - -type custom interface { - Marshal() ([]byte, error) - Unmarshal(data []byte) error - Size() int -} - -var customType = reflect.TypeOf((*custom)(nil)).Elem() diff --git a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/decode.go b/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/decode.go deleted file mode 100644 index 63b0f08..0000000 --- a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/decode.go +++ /dev/null @@ -1,427 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for decoding protocol buffer data to construct in-memory representations. - */ - -import ( - "errors" - "fmt" - "io" -) - -// errOverflow is returned when an integer is too large to be represented. -var errOverflow = errors.New("proto: integer overflow") - -// ErrInternalBadWireType is returned by generated code when an incorrect -// wire type is encountered. It does not get returned to user code. -var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") - -// DecodeVarint reads a varint-encoded integer from the slice. -// It returns the integer and the number of bytes consumed, or -// zero if there is not enough. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func DecodeVarint(buf []byte) (x uint64, n int) { - for shift := uint(0); shift < 64; shift += 7 { - if n >= len(buf) { - return 0, 0 - } - b := uint64(buf[n]) - n++ - x |= (b & 0x7F) << shift - if (b & 0x80) == 0 { - return x, n - } - } - - // The number is too large to represent in a 64-bit value. - return 0, 0 -} - -func (p *Buffer) decodeVarintSlow() (x uint64, err error) { - i := p.index - l := len(p.buf) - - for shift := uint(0); shift < 64; shift += 7 { - if i >= l { - err = io.ErrUnexpectedEOF - return - } - b := p.buf[i] - i++ - x |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - p.index = i - return - } - } - - // The number is too large to represent in a 64-bit value. - err = errOverflow - return -} - -// DecodeVarint reads a varint-encoded integer from the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) DecodeVarint() (x uint64, err error) { - i := p.index - buf := p.buf - - if i >= len(buf) { - return 0, io.ErrUnexpectedEOF - } else if buf[i] < 0x80 { - p.index++ - return uint64(buf[i]), nil - } else if len(buf)-i < 10 { - return p.decodeVarintSlow() - } - - var b uint64 - // we already checked the first byte - x = uint64(buf[i]) - 0x80 - i++ - - b = uint64(buf[i]) - i++ - x += b << 7 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 7 - - b = uint64(buf[i]) - i++ - x += b << 14 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 14 - - b = uint64(buf[i]) - i++ - x += b << 21 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 21 - - b = uint64(buf[i]) - i++ - x += b << 28 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 28 - - b = uint64(buf[i]) - i++ - x += b << 35 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 35 - - b = uint64(buf[i]) - i++ - x += b << 42 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 42 - - b = uint64(buf[i]) - i++ - x += b << 49 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 49 - - b = uint64(buf[i]) - i++ - x += b << 56 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 56 - - b = uint64(buf[i]) - i++ - x += b << 63 - if b&0x80 == 0 { - goto done - } - - return 0, errOverflow - -done: - p.index = i - return x, nil -} - -// DecodeFixed64 reads a 64-bit integer from the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) DecodeFixed64() (x uint64, err error) { - // x, err already 0 - i := p.index + 8 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-8]) - x |= uint64(p.buf[i-7]) << 8 - x |= uint64(p.buf[i-6]) << 16 - x |= uint64(p.buf[i-5]) << 24 - x |= uint64(p.buf[i-4]) << 32 - x |= uint64(p.buf[i-3]) << 40 - x |= uint64(p.buf[i-2]) << 48 - x |= uint64(p.buf[i-1]) << 56 - return -} - -// DecodeFixed32 reads a 32-bit integer from the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) DecodeFixed32() (x uint64, err error) { - // x, err already 0 - i := p.index + 4 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-4]) - x |= uint64(p.buf[i-3]) << 8 - x |= uint64(p.buf[i-2]) << 16 - x |= uint64(p.buf[i-1]) << 24 - return -} - -// DecodeZigzag64 reads a zigzag-encoded 64-bit integer -// from the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) DecodeZigzag64() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) - return -} - -// DecodeZigzag32 reads a zigzag-encoded 32-bit integer -// from the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) DecodeZigzag32() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) - return -} - -// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { - n, err := p.DecodeVarint() - if err != nil { - return nil, err - } - - nb := int(n) - if nb < 0 { - return nil, fmt.Errorf("proto: bad byte length %d", nb) - } - end := p.index + nb - if end < p.index || end > len(p.buf) { - return nil, io.ErrUnexpectedEOF - } - - if !alloc { - // todo: check if can get more uses of alloc=false - buf = p.buf[p.index:end] - p.index += nb - return - } - - buf = make([]byte, nb) - copy(buf, p.buf[p.index:]) - p.index += nb - return -} - -// DecodeStringBytes reads an encoded string from the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) DecodeStringBytes() (s string, err error) { - buf, err := p.DecodeRawBytes(false) - if err != nil { - return - } - return string(buf), nil -} - -// Unmarshaler is the interface representing objects that can -// unmarshal themselves. The argument points to data that may be -// overwritten, so implementations should not keep references to the -// buffer. -// Unmarshal implementations should not clear the receiver. -// Any unmarshaled data should be merged into the receiver. -// Callers of Unmarshal that do not want to retain existing data -// should Reset the receiver before calling Unmarshal. -type Unmarshaler interface { - Unmarshal([]byte) error -} - -// newUnmarshaler is the interface representing objects that can -// unmarshal themselves. The semantics are identical to Unmarshaler. -// -// This exists to support protoc-gen-go generated messages. -// The proto package will stop type-asserting to this interface in the future. -// -// DO NOT DEPEND ON THIS. -type newUnmarshaler interface { - XXX_Unmarshal([]byte) error -} - -// Unmarshal parses the protocol buffer representation in buf and places the -// decoded result in pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// Unmarshal resets pb before starting to unmarshal, so any -// existing data in pb is always removed. Use UnmarshalMerge -// to preserve and append to existing data. -func Unmarshal(buf []byte, pb Message) error { - pb.Reset() - if u, ok := pb.(newUnmarshaler); ok { - return u.XXX_Unmarshal(buf) - } - if u, ok := pb.(Unmarshaler); ok { - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// UnmarshalMerge parses the protocol buffer representation in buf and -// writes the decoded result to pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// UnmarshalMerge merges into existing data in pb. -// Most code should use Unmarshal instead. -func UnmarshalMerge(buf []byte, pb Message) error { - if u, ok := pb.(newUnmarshaler); ok { - return u.XXX_Unmarshal(buf) - } - if u, ok := pb.(Unmarshaler); ok { - // NOTE: The history of proto have unfortunately been inconsistent - // whether Unmarshaler should or should not implicitly clear itself. - // Some implementations do, most do not. - // Thus, calling this here may or may not do what people want. - // - // See https://github.com/golang/protobuf/issues/424 - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// DecodeMessage reads a count-delimited message from the Buffer. -func (p *Buffer) DecodeMessage(pb Message) error { - enc, err := p.DecodeRawBytes(false) - if err != nil { - return err - } - return NewBuffer(enc).Unmarshal(pb) -} - -// DecodeGroup reads a tag-delimited group from the Buffer. -// StartGroup tag is already consumed. This function consumes -// EndGroup tag. -func (p *Buffer) DecodeGroup(pb Message) error { - b := p.buf[p.index:] - x, y := findEndGroup(b) - if x < 0 { - return io.ErrUnexpectedEOF - } - err := Unmarshal(b[:x], pb) - p.index += y - return err -} - -// Unmarshal parses the protocol buffer representation in the -// Buffer and places the decoded result in pb. If the struct -// underlying pb does not match the data in the buffer, the results can be -// unpredictable. -// -// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. -func (p *Buffer) Unmarshal(pb Message) error { - // If the object can unmarshal itself, let it. - if u, ok := pb.(newUnmarshaler); ok { - err := u.XXX_Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - if u, ok := pb.(Unmarshaler); ok { - // NOTE: The history of proto have unfortunately been inconsistent - // whether Unmarshaler should or should not implicitly clear itself. - // Some implementations do, most do not. - // Thus, calling this here may or may not do what people want. - // - // See https://github.com/golang/protobuf/issues/424 - err := u.Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - - // Slow workaround for messages that aren't Unmarshalers. - // This includes some hand-coded .pb.go files and - // bootstrap protos. - // TODO: fix all of those and then add Unmarshal to - // the Message interface. Then: - // The cast above and code below can be deleted. - // The old unmarshaler can be deleted. - // Clients can call Unmarshal directly (can already do that, actually). - var info InternalMessageInfo - err := info.Unmarshal(pb, p.buf[p.index:]) - p.index = len(p.buf) - return err -} diff --git a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/deprecated.go b/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/deprecated.go deleted file mode 100644 index 35b882c..0000000 --- a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/deprecated.go +++ /dev/null @@ -1,63 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2018 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import "errors" - -// Deprecated: do not use. -type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } - -// Deprecated: do not use. -func GetStats() Stats { return Stats{} } - -// Deprecated: do not use. -func MarshalMessageSet(interface{}) ([]byte, error) { - return nil, errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func UnmarshalMessageSet([]byte, interface{}) error { - return errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func MarshalMessageSetJSON(interface{}) ([]byte, error) { - return nil, errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func UnmarshalMessageSetJSON([]byte, interface{}) error { - return errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func RegisterMessageSetType(Message, int32, string) {} diff --git a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/discard.go b/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/discard.go deleted file mode 100644 index fe1bd7d..0000000 --- a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/discard.go +++ /dev/null @@ -1,350 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2017 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "reflect" - "strings" - "sync" - "sync/atomic" -) - -type generatedDiscarder interface { - XXX_DiscardUnknown() -} - -// DiscardUnknown recursively discards all unknown fields from this message -// and all embedded messages. -// -// When unmarshaling a message with unrecognized fields, the tags and values -// of such fields are preserved in the Message. This allows a later call to -// marshal to be able to produce a message that continues to have those -// unrecognized fields. To avoid this, DiscardUnknown is used to -// explicitly clear the unknown fields after unmarshaling. -// -// For proto2 messages, the unknown fields of message extensions are only -// discarded from messages that have been accessed via GetExtension. -func DiscardUnknown(m Message) { - if m, ok := m.(generatedDiscarder); ok { - m.XXX_DiscardUnknown() - return - } - // TODO: Dynamically populate a InternalMessageInfo for legacy messages, - // but the master branch has no implementation for InternalMessageInfo, - // so it would be more work to replicate that approach. - discardLegacy(m) -} - -// DiscardUnknown recursively discards all unknown fields. -func (a *InternalMessageInfo) DiscardUnknown(m Message) { - di := atomicLoadDiscardInfo(&a.discard) - if di == nil { - di = getDiscardInfo(reflect.TypeOf(m).Elem()) - atomicStoreDiscardInfo(&a.discard, di) - } - di.discard(toPointer(&m)) -} - -type discardInfo struct { - typ reflect.Type - - initialized int32 // 0: only typ is valid, 1: everything is valid - lock sync.Mutex - - fields []discardFieldInfo - unrecognized field -} - -type discardFieldInfo struct { - field field // Offset of field, guaranteed to be valid - discard func(src pointer) -} - -var ( - discardInfoMap = map[reflect.Type]*discardInfo{} - discardInfoLock sync.Mutex -) - -func getDiscardInfo(t reflect.Type) *discardInfo { - discardInfoLock.Lock() - defer discardInfoLock.Unlock() - di := discardInfoMap[t] - if di == nil { - di = &discardInfo{typ: t} - discardInfoMap[t] = di - } - return di -} - -func (di *discardInfo) discard(src pointer) { - if src.isNil() { - return // Nothing to do. - } - - if atomic.LoadInt32(&di.initialized) == 0 { - di.computeDiscardInfo() - } - - for _, fi := range di.fields { - sfp := src.offset(fi.field) - fi.discard(sfp) - } - - // For proto2 messages, only discard unknown fields in message extensions - // that have been accessed via GetExtension. - if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil { - // Ignore lock since DiscardUnknown is not concurrency safe. - emm, _ := em.extensionsRead() - for _, mx := range emm { - if m, ok := mx.value.(Message); ok { - DiscardUnknown(m) - } - } - } - - if di.unrecognized.IsValid() { - *src.offset(di.unrecognized).toBytes() = nil - } -} - -func (di *discardInfo) computeDiscardInfo() { - di.lock.Lock() - defer di.lock.Unlock() - if di.initialized != 0 { - return - } - t := di.typ - n := t.NumField() - - for i := 0; i < n; i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - - dfi := discardFieldInfo{field: toField(&f)} - tf := f.Type - - // Unwrap tf to get its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name)) - } - - switch tf.Kind() { - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name)) - case isSlice: // E.g., []*pb.T - discardInfo := getDiscardInfo(tf) - dfi.discard = func(src pointer) { - sps := src.getPointerSlice() - for _, sp := range sps { - if !sp.isNil() { - discardInfo.discard(sp) - } - } - } - default: // E.g., *pb.T - discardInfo := getDiscardInfo(tf) - dfi.discard = func(src pointer) { - sp := src.getPointer() - if !sp.isNil() { - discardInfo.discard(sp) - } - } - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name)) - default: // E.g., map[K]V - if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T) - dfi.discard = func(src pointer) { - sm := src.asPointerTo(tf).Elem() - if sm.Len() == 0 { - return - } - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - DiscardUnknown(val.Interface().(Message)) - } - } - } else { - dfi.discard = func(pointer) {} // Noop - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name)) - default: // E.g., interface{} - // TODO: Make this faster? - dfi.discard = func(src pointer) { - su := src.asPointerTo(tf).Elem() - if !su.IsNil() { - sv := su.Elem().Elem().Field(0) - if sv.Kind() == reflect.Ptr && sv.IsNil() { - return - } - switch sv.Type().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - DiscardUnknown(sv.Interface().(Message)) - } - } - } - } - default: - continue - } - di.fields = append(di.fields, dfi) - } - - di.unrecognized = invalidField - if f, ok := t.FieldByName("XXX_unrecognized"); ok { - if f.Type != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - di.unrecognized = toField(&f) - } - - atomic.StoreInt32(&di.initialized, 1) -} - -func discardLegacy(m Message) { - v := reflect.ValueOf(m) - if v.Kind() != reflect.Ptr || v.IsNil() { - return - } - v = v.Elem() - if v.Kind() != reflect.Struct { - return - } - t := v.Type() - - for i := 0; i < v.NumField(); i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - vf := v.Field(i) - tf := f.Type - - // Unwrap tf to get its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name)) - } - - switch tf.Kind() { - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name)) - case isSlice: // E.g., []*pb.T - for j := 0; j < vf.Len(); j++ { - discardLegacy(vf.Index(j).Interface().(Message)) - } - default: // E.g., *pb.T - discardLegacy(vf.Interface().(Message)) - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name)) - default: // E.g., map[K]V - tv := vf.Type().Elem() - if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T) - for _, key := range vf.MapKeys() { - val := vf.MapIndex(key) - discardLegacy(val.Interface().(Message)) - } - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name)) - default: // E.g., test_proto.isCommunique_Union interface - if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" { - vf = vf.Elem() // E.g., *test_proto.Communique_Msg - if !vf.IsNil() { - vf = vf.Elem() // E.g., test_proto.Communique_Msg - vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value - if vf.Kind() == reflect.Ptr { - discardLegacy(vf.Interface().(Message)) - } - } - } - } - } - } - - if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() { - if vf.Type() != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - vf.Set(reflect.ValueOf([]byte(nil))) - } - - // For proto2 messages, only discard unknown fields in message extensions - // that have been accessed via GetExtension. - if em, err := extendable(m); err == nil { - // Ignore lock since discardLegacy is not concurrency safe. - emm, _ := em.extensionsRead() - for _, mx := range emm { - if m, ok := mx.value.(Message); ok { - discardLegacy(m) - } - } - } -} diff --git a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/duration.go b/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/duration.go deleted file mode 100644 index 93464c9..0000000 --- a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/duration.go +++ /dev/null @@ -1,100 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// This file implements conversions between google.protobuf.Duration -// and time.Duration. - -import ( - "errors" - "fmt" - "time" -) - -const ( - // Range of a Duration in seconds, as specified in - // google/protobuf/duration.proto. This is about 10,000 years in seconds. - maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) - minSeconds = -maxSeconds -) - -// validateDuration determines whether the Duration is valid according to the -// definition in google/protobuf/duration.proto. A valid Duration -// may still be too large to fit into a time.Duration (the range of Duration -// is about 10,000 years, and the range of time.Duration is about 290). -func validateDuration(d *duration) error { - if d == nil { - return errors.New("duration: nil Duration") - } - if d.Seconds < minSeconds || d.Seconds > maxSeconds { - return fmt.Errorf("duration: %#v: seconds out of range", d) - } - if d.Nanos <= -1e9 || d.Nanos >= 1e9 { - return fmt.Errorf("duration: %#v: nanos out of range", d) - } - // Seconds and Nanos must have the same sign, unless d.Nanos is zero. - if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { - return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d) - } - return nil -} - -// DurationFromProto converts a Duration to a time.Duration. DurationFromProto -// returns an error if the Duration is invalid or is too large to be -// represented in a time.Duration. -func durationFromProto(p *duration) (time.Duration, error) { - if err := validateDuration(p); err != nil { - return 0, err - } - d := time.Duration(p.Seconds) * time.Second - if int64(d/time.Second) != p.Seconds { - return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) - } - if p.Nanos != 0 { - d += time.Duration(p.Nanos) - if (d < 0) != (p.Nanos < 0) { - return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) - } - } - return d, nil -} - -// DurationProto converts a time.Duration to a Duration. -func durationProto(d time.Duration) *duration { - nanos := d.Nanoseconds() - secs := nanos / 1e9 - nanos -= secs * 1e9 - return &duration{ - Seconds: secs, - Nanos: int32(nanos), - } -} diff --git a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/duration_gogo.go b/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/duration_gogo.go deleted file mode 100644 index e748e17..0000000 --- a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/duration_gogo.go +++ /dev/null @@ -1,49 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2016, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "reflect" - "time" -) - -var durationType = reflect.TypeOf((*time.Duration)(nil)).Elem() - -type duration struct { - Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` -} - -func (m *duration) Reset() { *m = duration{} } -func (*duration) ProtoMessage() {} -func (*duration) String() string { return "duration" } - -func init() { - RegisterType((*duration)(nil), "gogo.protobuf.proto.duration") -} diff --git a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/encode.go b/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/encode.go deleted file mode 100644 index 3abfed2..0000000 --- a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/encode.go +++ /dev/null @@ -1,203 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "errors" - "reflect" -) - -var ( - // errRepeatedHasNil is the error returned if Marshal is called with - // a struct with a repeated field containing a nil element. - errRepeatedHasNil = errors.New("proto: repeated field has nil element") - - // errOneofHasNil is the error returned if Marshal is called with - // a struct with a oneof field containing a nil element. - errOneofHasNil = errors.New("proto: oneof field has nil value") - - // ErrNil is the error returned if Marshal is called with nil. - ErrNil = errors.New("proto: Marshal called with nil") - - // ErrTooLarge is the error returned if Marshal is called with a - // message that encodes to >2GB. - ErrTooLarge = errors.New("proto: message encodes to over 2 GB") -) - -// The fundamental encoders that put bytes on the wire. -// Those that take integer types all accept uint64 and are -// therefore of type valueEncoder. - -const maxVarintBytes = 10 // maximum length of a varint - -// EncodeVarint returns the varint encoding of x. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -// Not used by the package itself, but helpful to clients -// wishing to use the same encoding. -func EncodeVarint(x uint64) []byte { - var buf [maxVarintBytes]byte - var n int - for n = 0; x > 127; n++ { - buf[n] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - buf[n] = uint8(x) - n++ - return buf[0:n] -} - -// EncodeVarint writes a varint-encoded integer to the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) EncodeVarint(x uint64) error { - for x >= 1<<7 { - p.buf = append(p.buf, uint8(x&0x7f|0x80)) - x >>= 7 - } - p.buf = append(p.buf, uint8(x)) - return nil -} - -// SizeVarint returns the varint encoding size of an integer. -func SizeVarint(x uint64) int { - switch { - case x < 1<<7: - return 1 - case x < 1<<14: - return 2 - case x < 1<<21: - return 3 - case x < 1<<28: - return 4 - case x < 1<<35: - return 5 - case x < 1<<42: - return 6 - case x < 1<<49: - return 7 - case x < 1<<56: - return 8 - case x < 1<<63: - return 9 - } - return 10 -} - -// EncodeFixed64 writes a 64-bit integer to the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) EncodeFixed64(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24), - uint8(x>>32), - uint8(x>>40), - uint8(x>>48), - uint8(x>>56)) - return nil -} - -// EncodeFixed32 writes a 32-bit integer to the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) EncodeFixed32(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24)) - return nil -} - -// EncodeZigzag64 writes a zigzag-encoded 64-bit integer -// to the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) EncodeZigzag64(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} - -// EncodeZigzag32 writes a zigzag-encoded 32-bit integer -// to the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) EncodeZigzag32(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - -// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) EncodeRawBytes(b []byte) error { - p.EncodeVarint(uint64(len(b))) - p.buf = append(p.buf, b...) - return nil -} - -// EncodeStringBytes writes an encoded string to the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) EncodeStringBytes(s string) error { - p.EncodeVarint(uint64(len(s))) - p.buf = append(p.buf, s...) - return nil -} - -// Marshaler is the interface representing objects that can marshal themselves. -type Marshaler interface { - Marshal() ([]byte, error) -} - -// EncodeMessage writes the protocol buffer to the Buffer, -// prefixed by a varint-encoded length. -func (p *Buffer) EncodeMessage(pb Message) error { - siz := Size(pb) - p.EncodeVarint(uint64(siz)) - return p.Marshal(pb) -} - -// All protocol buffer fields are nillable, but be careful. -func isNil(v reflect.Value) bool { - switch v.Kind() { - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return v.IsNil() - } - return false -} diff --git a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/encode_gogo.go b/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/encode_gogo.go deleted file mode 100644 index 0f5fb17..0000000 --- a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/encode_gogo.go +++ /dev/null @@ -1,33 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -func NewRequiredNotSetError(field string) *RequiredNotSetError { - return &RequiredNotSetError{field} -} diff --git a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/equal.go b/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/equal.go deleted file mode 100644 index d4db5a1..0000000 --- a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/equal.go +++ /dev/null @@ -1,300 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer comparison. - -package proto - -import ( - "bytes" - "log" - "reflect" - "strings" -) - -/* -Equal returns true iff protocol buffers a and b are equal. -The arguments must both be pointers to protocol buffer structs. - -Equality is defined in this way: - - Two messages are equal iff they are the same type, - corresponding fields are equal, unknown field sets - are equal, and extensions sets are equal. - - Two set scalar fields are equal iff their values are equal. - If the fields are of a floating-point type, remember that - NaN != x for all x, including NaN. If the message is defined - in a proto3 .proto file, fields are not "set"; specifically, - zero length proto3 "bytes" fields are equal (nil == {}). - - Two repeated fields are equal iff their lengths are the same, - and their corresponding elements are equal. Note a "bytes" field, - although represented by []byte, is not a repeated field and the - rule for the scalar fields described above applies. - - Two unset fields are equal. - - Two unknown field sets are equal if their current - encoded state is equal. - - Two extension sets are equal iff they have corresponding - elements that are pairwise equal. - - Two map fields are equal iff their lengths are the same, - and they contain the same set of elements. Zero-length map - fields are equal. - - Every other combination of things are not equal. - -The return value is undefined if a and b are not protocol buffers. -*/ -func Equal(a, b Message) bool { - if a == nil || b == nil { - return a == b - } - v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) - if v1.Type() != v2.Type() { - return false - } - if v1.Kind() == reflect.Ptr { - if v1.IsNil() { - return v2.IsNil() - } - if v2.IsNil() { - return false - } - v1, v2 = v1.Elem(), v2.Elem() - } - if v1.Kind() != reflect.Struct { - return false - } - return equalStruct(v1, v2) -} - -// v1 and v2 are known to have the same type. -func equalStruct(v1, v2 reflect.Value) bool { - sprop := GetProperties(v1.Type()) - for i := 0; i < v1.NumField(); i++ { - f := v1.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - f1, f2 := v1.Field(i), v2.Field(i) - if f.Type.Kind() == reflect.Ptr { - if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { - // both unset - continue - } else if n1 != n2 { - // set/unset mismatch - return false - } - f1, f2 = f1.Elem(), f2.Elem() - } - if !equalAny(f1, f2, sprop.Prop[i]) { - return false - } - } - - if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_InternalExtensions") - if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { - return false - } - } - - if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_extensions") - if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { - return false - } - } - - uf := v1.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return true - } - - u1 := uf.Bytes() - u2 := v2.FieldByName("XXX_unrecognized").Bytes() - return bytes.Equal(u1, u2) -} - -// v1 and v2 are known to have the same type. -// prop may be nil. -func equalAny(v1, v2 reflect.Value, prop *Properties) bool { - if v1.Type() == protoMessageType { - m1, _ := v1.Interface().(Message) - m2, _ := v2.Interface().(Message) - return Equal(m1, m2) - } - switch v1.Kind() { - case reflect.Bool: - return v1.Bool() == v2.Bool() - case reflect.Float32, reflect.Float64: - return v1.Float() == v2.Float() - case reflect.Int32, reflect.Int64: - return v1.Int() == v2.Int() - case reflect.Interface: - // Probably a oneof field; compare the inner values. - n1, n2 := v1.IsNil(), v2.IsNil() - if n1 || n2 { - return n1 == n2 - } - e1, e2 := v1.Elem(), v2.Elem() - if e1.Type() != e2.Type() { - return false - } - return equalAny(e1, e2, nil) - case reflect.Map: - if v1.Len() != v2.Len() { - return false - } - for _, key := range v1.MapKeys() { - val2 := v2.MapIndex(key) - if !val2.IsValid() { - // This key was not found in the second map. - return false - } - if !equalAny(v1.MapIndex(key), val2, nil) { - return false - } - } - return true - case reflect.Ptr: - // Maps may have nil values in them, so check for nil. - if v1.IsNil() && v2.IsNil() { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return equalAny(v1.Elem(), v2.Elem(), prop) - case reflect.Slice: - if v1.Type().Elem().Kind() == reflect.Uint8 { - // short circuit: []byte - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value. - if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) - } - - if v1.Len() != v2.Len() { - return false - } - for i := 0; i < v1.Len(); i++ { - if !equalAny(v1.Index(i), v2.Index(i), prop) { - return false - } - } - return true - case reflect.String: - return v1.Interface().(string) == v2.Interface().(string) - case reflect.Struct: - return equalStruct(v1, v2) - case reflect.Uint32, reflect.Uint64: - return v1.Uint() == v2.Uint() - } - - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to compare %v", v1) - return false -} - -// base is the struct type that the extensions are based on. -// x1 and x2 are InternalExtensions. -func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { - em1, _ := x1.extensionsRead() - em2, _ := x2.extensionsRead() - return equalExtMap(base, em1, em2) -} - -func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { - if len(em1) != len(em2) { - return false - } - - for extNum, e1 := range em1 { - e2, ok := em2[extNum] - if !ok { - return false - } - - m1, m2 := e1.value, e2.value - - if m1 == nil && m2 == nil { - // Both have only encoded form. - if bytes.Equal(e1.enc, e2.enc) { - continue - } - // The bytes are different, but the extensions might still be - // equal. We need to decode them to compare. - } - - if m1 != nil && m2 != nil { - // Both are unencoded. - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - continue - } - - // At least one is encoded. To do a semantically correct comparison - // we need to unmarshal them first. - var desc *ExtensionDesc - if m := extensionMaps[base]; m != nil { - desc = m[extNum] - } - if desc == nil { - // If both have only encoded form and the bytes are the same, - // it is handled above. We get here when the bytes are different. - // We don't know how to decode it, so just compare them as byte - // slices. - log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) - return false - } - var err error - if m1 == nil { - m1, err = decodeExtension(e1.enc, desc) - } - if m2 == nil && err == nil { - m2, err = decodeExtension(e2.enc, desc) - } - if err != nil { - // The encoded form is invalid. - log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) - return false - } - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - } - - return true -} diff --git a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/extensions.go b/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/extensions.go deleted file mode 100644 index 686bd2a..0000000 --- a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/extensions.go +++ /dev/null @@ -1,604 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Types and routines for supporting protocol buffer extensions. - */ - -import ( - "errors" - "fmt" - "io" - "reflect" - "strconv" - "sync" -) - -// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. -var ErrMissingExtension = errors.New("proto: missing extension") - -// ExtensionRange represents a range of message extensions for a protocol buffer. -// Used in code generated by the protocol compiler. -type ExtensionRange struct { - Start, End int32 // both inclusive -} - -// extendableProto is an interface implemented by any protocol buffer generated by the current -// proto compiler that may be extended. -type extendableProto interface { - Message - ExtensionRangeArray() []ExtensionRange - extensionsWrite() map[int32]Extension - extensionsRead() (map[int32]Extension, sync.Locker) -} - -// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous -// version of the proto compiler that may be extended. -type extendableProtoV1 interface { - Message - ExtensionRangeArray() []ExtensionRange - ExtensionMap() map[int32]Extension -} - -// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. -type extensionAdapter struct { - extendableProtoV1 -} - -func (e extensionAdapter) extensionsWrite() map[int32]Extension { - return e.ExtensionMap() -} - -func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { - return e.ExtensionMap(), notLocker{} -} - -// notLocker is a sync.Locker whose Lock and Unlock methods are nops. -type notLocker struct{} - -func (n notLocker) Lock() {} -func (n notLocker) Unlock() {} - -// extendable returns the extendableProto interface for the given generated proto message. -// If the proto message has the old extension format, it returns a wrapper that implements -// the extendableProto interface. -func extendable(p interface{}) (extendableProto, error) { - switch p := p.(type) { - case extendableProto: - if isNilPtr(p) { - return nil, fmt.Errorf("proto: nil %T is not extendable", p) - } - return p, nil - case extendableProtoV1: - if isNilPtr(p) { - return nil, fmt.Errorf("proto: nil %T is not extendable", p) - } - return extensionAdapter{p}, nil - case extensionsBytes: - return slowExtensionAdapter{p}, nil - } - // Don't allocate a specific error containing %T: - // this is the hot path for Clone and MarshalText. - return nil, errNotExtendable -} - -var errNotExtendable = errors.New("proto: not an extendable proto.Message") - -func isNilPtr(x interface{}) bool { - v := reflect.ValueOf(x) - return v.Kind() == reflect.Ptr && v.IsNil() -} - -// XXX_InternalExtensions is an internal representation of proto extensions. -// -// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, -// thus gaining the unexported 'extensions' method, which can be called only from the proto package. -// -// The methods of XXX_InternalExtensions are not concurrency safe in general, -// but calls to logically read-only methods such as has and get may be executed concurrently. -type XXX_InternalExtensions struct { - // The struct must be indirect so that if a user inadvertently copies a - // generated message and its embedded XXX_InternalExtensions, they - // avoid the mayhem of a copied mutex. - // - // The mutex serializes all logically read-only operations to p.extensionMap. - // It is up to the client to ensure that write operations to p.extensionMap are - // mutually exclusive with other accesses. - p *struct { - mu sync.Mutex - extensionMap map[int32]Extension - } -} - -// extensionsWrite returns the extension map, creating it on first use. -func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { - if e.p == nil { - e.p = new(struct { - mu sync.Mutex - extensionMap map[int32]Extension - }) - e.p.extensionMap = make(map[int32]Extension) - } - return e.p.extensionMap -} - -// extensionsRead returns the extensions map for read-only use. It may be nil. -// The caller must hold the returned mutex's lock when accessing Elements within the map. -func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { - if e.p == nil { - return nil, nil - } - return e.p.extensionMap, &e.p.mu -} - -// ExtensionDesc represents an extension specification. -// Used in generated code from the protocol compiler. -type ExtensionDesc struct { - ExtendedType Message // nil pointer to the type that is being extended - ExtensionType interface{} // nil pointer to the extension type - Field int32 // field number - Name string // fully-qualified name of extension, for text formatting - Tag string // protobuf tag style - Filename string // name of the file in which the extension is defined -} - -func (ed *ExtensionDesc) repeated() bool { - t := reflect.TypeOf(ed.ExtensionType) - return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 -} - -// Extension represents an extension in a message. -type Extension struct { - // When an extension is stored in a message using SetExtension - // only desc and value are set. When the message is marshaled - // enc will be set to the encoded form of the message. - // - // When a message is unmarshaled and contains extensions, each - // extension will have only enc set. When such an extension is - // accessed using GetExtension (or GetExtensions) desc and value - // will be set. - desc *ExtensionDesc - value interface{} - enc []byte -} - -// SetRawExtension is for testing only. -func SetRawExtension(base Message, id int32, b []byte) { - if ebase, ok := base.(extensionsBytes); ok { - clearExtension(base, id) - ext := ebase.GetExtensions() - *ext = append(*ext, b...) - return - } - epb, err := extendable(base) - if err != nil { - return - } - extmap := epb.extensionsWrite() - extmap[id] = Extension{enc: b} -} - -// isExtensionField returns true iff the given field number is in an extension range. -func isExtensionField(pb extendableProto, field int32) bool { - for _, er := range pb.ExtensionRangeArray() { - if er.Start <= field && field <= er.End { - return true - } - } - return false -} - -// checkExtensionTypes checks that the given extension is valid for pb. -func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { - var pbi interface{} = pb - // Check the extended type. - if ea, ok := pbi.(extensionAdapter); ok { - pbi = ea.extendableProtoV1 - } - if ea, ok := pbi.(slowExtensionAdapter); ok { - pbi = ea.extensionsBytes - } - if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { - return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a) - } - // Check the range. - if !isExtensionField(pb, extension.Field) { - return errors.New("proto: bad extension number; not in declared ranges") - } - return nil -} - -// extPropKey is sufficient to uniquely identify an extension. -type extPropKey struct { - base reflect.Type - field int32 -} - -var extProp = struct { - sync.RWMutex - m map[extPropKey]*Properties -}{ - m: make(map[extPropKey]*Properties), -} - -func extensionProperties(ed *ExtensionDesc) *Properties { - key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} - - extProp.RLock() - if prop, ok := extProp.m[key]; ok { - extProp.RUnlock() - return prop - } - extProp.RUnlock() - - extProp.Lock() - defer extProp.Unlock() - // Check again. - if prop, ok := extProp.m[key]; ok { - return prop - } - - prop := new(Properties) - prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) - extProp.m[key] = prop - return prop -} - -// HasExtension returns whether the given extension is present in pb. -func HasExtension(pb Message, extension *ExtensionDesc) bool { - if epb, doki := pb.(extensionsBytes); doki { - ext := epb.GetExtensions() - buf := *ext - o := 0 - for o < len(buf) { - tag, n := DecodeVarint(buf[o:]) - fieldNum := int32(tag >> 3) - if int32(fieldNum) == extension.Field { - return true - } - wireType := int(tag & 0x7) - o += n - l, err := size(buf[o:], wireType) - if err != nil { - return false - } - o += l - } - return false - } - // TODO: Check types, field numbers, etc.? - epb, err := extendable(pb) - if err != nil { - return false - } - extmap, mu := epb.extensionsRead() - if extmap == nil { - return false - } - mu.Lock() - _, ok := extmap[extension.Field] - mu.Unlock() - return ok -} - -// ClearExtension removes the given extension from pb. -func ClearExtension(pb Message, extension *ExtensionDesc) { - clearExtension(pb, extension.Field) -} - -func clearExtension(pb Message, fieldNum int32) { - if epb, ok := pb.(extensionsBytes); ok { - offset := 0 - for offset != -1 { - offset = deleteExtension(epb, fieldNum, offset) - } - return - } - epb, err := extendable(pb) - if err != nil { - return - } - // TODO: Check types, field numbers, etc.? - extmap := epb.extensionsWrite() - delete(extmap, fieldNum) -} - -// GetExtension retrieves a proto2 extended field from pb. -// -// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), -// then GetExtension parses the encoded field and returns a Go value of the specified type. -// If the field is not present, then the default value is returned (if one is specified), -// otherwise ErrMissingExtension is reported. -// -// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil), -// then GetExtension returns the raw encoded bytes of the field extension. -func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { - if epb, doki := pb.(extensionsBytes); doki { - ext := epb.GetExtensions() - return decodeExtensionFromBytes(extension, *ext) - } - - epb, err := extendable(pb) - if err != nil { - return nil, err - } - - if extension.ExtendedType != nil { - // can only check type if this is a complete descriptor - if cerr := checkExtensionTypes(epb, extension); cerr != nil { - return nil, cerr - } - } - - emap, mu := epb.extensionsRead() - if emap == nil { - return defaultExtensionValue(extension) - } - mu.Lock() - defer mu.Unlock() - e, ok := emap[extension.Field] - if !ok { - // defaultExtensionValue returns the default value or - // ErrMissingExtension if there is no default. - return defaultExtensionValue(extension) - } - - if e.value != nil { - // Already decoded. Check the descriptor, though. - if e.desc != extension { - // This shouldn't happen. If it does, it means that - // GetExtension was called twice with two different - // descriptors with the same field number. - return nil, errors.New("proto: descriptor conflict") - } - return e.value, nil - } - - if extension.ExtensionType == nil { - // incomplete descriptor - return e.enc, nil - } - - v, err := decodeExtension(e.enc, extension) - if err != nil { - return nil, err - } - - // Remember the decoded version and drop the encoded version. - // That way it is safe to mutate what we return. - e.value = v - e.desc = extension - e.enc = nil - emap[extension.Field] = e - return e.value, nil -} - -// defaultExtensionValue returns the default value for extension. -// If no default for an extension is defined ErrMissingExtension is returned. -func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { - if extension.ExtensionType == nil { - // incomplete descriptor, so no default - return nil, ErrMissingExtension - } - - t := reflect.TypeOf(extension.ExtensionType) - props := extensionProperties(extension) - - sf, _, err := fieldDefault(t, props) - if err != nil { - return nil, err - } - - if sf == nil || sf.value == nil { - // There is no default value. - return nil, ErrMissingExtension - } - - if t.Kind() != reflect.Ptr { - // We do not need to return a Ptr, we can directly return sf.value. - return sf.value, nil - } - - // We need to return an interface{} that is a pointer to sf.value. - value := reflect.New(t).Elem() - value.Set(reflect.New(value.Type().Elem())) - if sf.kind == reflect.Int32 { - // We may have an int32 or an enum, but the underlying data is int32. - // Since we can't set an int32 into a non int32 reflect.value directly - // set it as a int32. - value.Elem().SetInt(int64(sf.value.(int32))) - } else { - value.Elem().Set(reflect.ValueOf(sf.value)) - } - return value.Interface(), nil -} - -// decodeExtension decodes an extension encoded in b. -func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { - t := reflect.TypeOf(extension.ExtensionType) - unmarshal := typeUnmarshaler(t, extension.Tag) - - // t is a pointer to a struct, pointer to basic type or a slice. - // Allocate space to store the pointer/slice. - value := reflect.New(t).Elem() - - var err error - for { - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - wire := int(x) & 7 - - b, err = unmarshal(b, valToPointer(value.Addr()), wire) - if err != nil { - return nil, err - } - - if len(b) == 0 { - break - } - } - return value.Interface(), nil -} - -// GetExtensions returns a slice of the extensions present in pb that are also listed in es. -// The returned slice has the same length as es; missing extensions will appear as nil elements. -func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { - epb, err := extendable(pb) - if err != nil { - return nil, err - } - extensions = make([]interface{}, len(es)) - for i, e := range es { - extensions[i], err = GetExtension(epb, e) - if err == ErrMissingExtension { - err = nil - } - if err != nil { - return - } - } - return -} - -// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. -// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing -// just the Field field, which defines the extension's field number. -func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { - epb, err := extendable(pb) - if err != nil { - return nil, err - } - registeredExtensions := RegisteredExtensions(pb) - - emap, mu := epb.extensionsRead() - if emap == nil { - return nil, nil - } - mu.Lock() - defer mu.Unlock() - extensions := make([]*ExtensionDesc, 0, len(emap)) - for extid, e := range emap { - desc := e.desc - if desc == nil { - desc = registeredExtensions[extid] - if desc == nil { - desc = &ExtensionDesc{Field: extid} - } - } - - extensions = append(extensions, desc) - } - return extensions, nil -} - -// SetExtension sets the specified extension of pb to the specified value. -func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { - if epb, ok := pb.(extensionsBytes); ok { - newb, err := encodeExtension(extension, value) - if err != nil { - return err - } - bb := epb.GetExtensions() - *bb = append(*bb, newb...) - return nil - } - epb, err := extendable(pb) - if err != nil { - return err - } - if err := checkExtensionTypes(epb, extension); err != nil { - return err - } - typ := reflect.TypeOf(extension.ExtensionType) - if typ != reflect.TypeOf(value) { - return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType) - } - // nil extension values need to be caught early, because the - // encoder can't distinguish an ErrNil due to a nil extension - // from an ErrNil due to a missing field. Extensions are - // always optional, so the encoder would just swallow the error - // and drop all the extensions from the encoded message. - if reflect.ValueOf(value).IsNil() { - return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) - } - - extmap := epb.extensionsWrite() - extmap[extension.Field] = Extension{desc: extension, value: value} - return nil -} - -// ClearAllExtensions clears all extensions from pb. -func ClearAllExtensions(pb Message) { - if epb, doki := pb.(extensionsBytes); doki { - ext := epb.GetExtensions() - *ext = []byte{} - return - } - epb, err := extendable(pb) - if err != nil { - return - } - m := epb.extensionsWrite() - for k := range m { - delete(m, k) - } -} - -// A global registry of extensions. -// The generated code will register the generated descriptors by calling RegisterExtension. - -var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) - -// RegisterExtension is called from the generated code. -func RegisterExtension(desc *ExtensionDesc) { - st := reflect.TypeOf(desc.ExtendedType).Elem() - m := extensionMaps[st] - if m == nil { - m = make(map[int32]*ExtensionDesc) - extensionMaps[st] = m - } - if _, ok := m[desc.Field]; ok { - panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) - } - m[desc.Field] = desc -} - -// RegisteredExtensions returns a map of the registered extensions of a -// protocol buffer struct, indexed by the extension number. -// The argument pb should be a nil pointer to the struct type. -func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { - return extensionMaps[reflect.TypeOf(pb).Elem()] -} diff --git a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go b/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go deleted file mode 100644 index 53ebd8c..0000000 --- a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go +++ /dev/null @@ -1,368 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "bytes" - "errors" - "fmt" - "io" - "reflect" - "sort" - "strings" - "sync" -) - -type extensionsBytes interface { - Message - ExtensionRangeArray() []ExtensionRange - GetExtensions() *[]byte -} - -type slowExtensionAdapter struct { - extensionsBytes -} - -func (s slowExtensionAdapter) extensionsWrite() map[int32]Extension { - panic("Please report a bug to github.com/gogo/protobuf if you see this message: Writing extensions is not supported for extensions stored in a byte slice field.") -} - -func (s slowExtensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { - b := s.GetExtensions() - m, err := BytesToExtensionsMap(*b) - if err != nil { - panic(err) - } - return m, notLocker{} -} - -func GetBoolExtension(pb Message, extension *ExtensionDesc, ifnotset bool) bool { - if reflect.ValueOf(pb).IsNil() { - return ifnotset - } - value, err := GetExtension(pb, extension) - if err != nil { - return ifnotset - } - if value == nil { - return ifnotset - } - if value.(*bool) == nil { - return ifnotset - } - return *(value.(*bool)) -} - -func (this *Extension) Equal(that *Extension) bool { - if err := this.Encode(); err != nil { - return false - } - if err := that.Encode(); err != nil { - return false - } - return bytes.Equal(this.enc, that.enc) -} - -func (this *Extension) Compare(that *Extension) int { - if err := this.Encode(); err != nil { - return 1 - } - if err := that.Encode(); err != nil { - return -1 - } - return bytes.Compare(this.enc, that.enc) -} - -func SizeOfInternalExtension(m extendableProto) (n int) { - info := getMarshalInfo(reflect.TypeOf(m)) - return info.sizeV1Extensions(m.extensionsWrite()) -} - -type sortableMapElem struct { - field int32 - ext Extension -} - -func newSortableExtensionsFromMap(m map[int32]Extension) sortableExtensions { - s := make(sortableExtensions, 0, len(m)) - for k, v := range m { - s = append(s, &sortableMapElem{field: k, ext: v}) - } - return s -} - -type sortableExtensions []*sortableMapElem - -func (this sortableExtensions) Len() int { return len(this) } - -func (this sortableExtensions) Swap(i, j int) { this[i], this[j] = this[j], this[i] } - -func (this sortableExtensions) Less(i, j int) bool { return this[i].field < this[j].field } - -func (this sortableExtensions) String() string { - sort.Sort(this) - ss := make([]string, len(this)) - for i := range this { - ss[i] = fmt.Sprintf("%d: %v", this[i].field, this[i].ext) - } - return "map[" + strings.Join(ss, ",") + "]" -} - -func StringFromInternalExtension(m extendableProto) string { - return StringFromExtensionsMap(m.extensionsWrite()) -} - -func StringFromExtensionsMap(m map[int32]Extension) string { - return newSortableExtensionsFromMap(m).String() -} - -func StringFromExtensionsBytes(ext []byte) string { - m, err := BytesToExtensionsMap(ext) - if err != nil { - panic(err) - } - return StringFromExtensionsMap(m) -} - -func EncodeInternalExtension(m extendableProto, data []byte) (n int, err error) { - return EncodeExtensionMap(m.extensionsWrite(), data) -} - -func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) { - o := 0 - for _, e := range m { - if err := e.Encode(); err != nil { - return 0, err - } - n := copy(data[o:], e.enc) - if n != len(e.enc) { - return 0, io.ErrShortBuffer - } - o += n - } - return o, nil -} - -func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) { - e := m[id] - if err := e.Encode(); err != nil { - return nil, err - } - return e.enc, nil -} - -func size(buf []byte, wire int) (int, error) { - switch wire { - case WireVarint: - _, n := DecodeVarint(buf) - return n, nil - case WireFixed64: - return 8, nil - case WireBytes: - v, n := DecodeVarint(buf) - return int(v) + n, nil - case WireFixed32: - return 4, nil - case WireStartGroup: - offset := 0 - for { - u, n := DecodeVarint(buf[offset:]) - fwire := int(u & 0x7) - offset += n - if fwire == WireEndGroup { - return offset, nil - } - s, err := size(buf[offset:], wire) - if err != nil { - return 0, err - } - offset += s - } - } - return 0, fmt.Errorf("proto: can't get size for unknown wire type %d", wire) -} - -func BytesToExtensionsMap(buf []byte) (map[int32]Extension, error) { - m := make(map[int32]Extension) - i := 0 - for i < len(buf) { - tag, n := DecodeVarint(buf[i:]) - if n <= 0 { - return nil, fmt.Errorf("unable to decode varint") - } - fieldNum := int32(tag >> 3) - wireType := int(tag & 0x7) - l, err := size(buf[i+n:], wireType) - if err != nil { - return nil, err - } - end := i + int(l) + n - m[int32(fieldNum)] = Extension{enc: buf[i:end]} - i = end - } - return m, nil -} - -func NewExtension(e []byte) Extension { - ee := Extension{enc: make([]byte, len(e))} - copy(ee.enc, e) - return ee -} - -func AppendExtension(e Message, tag int32, buf []byte) { - if ee, eok := e.(extensionsBytes); eok { - ext := ee.GetExtensions() - *ext = append(*ext, buf...) - return - } - if ee, eok := e.(extendableProto); eok { - m := ee.extensionsWrite() - ext := m[int32(tag)] // may be missing - ext.enc = append(ext.enc, buf...) - m[int32(tag)] = ext - } -} - -func encodeExtension(extension *ExtensionDesc, value interface{}) ([]byte, error) { - u := getMarshalInfo(reflect.TypeOf(extension.ExtendedType)) - ei := u.getExtElemInfo(extension) - v := value - p := toAddrPointer(&v, ei.isptr) - siz := ei.sizer(p, SizeVarint(ei.wiretag)) - buf := make([]byte, 0, siz) - return ei.marshaler(buf, p, ei.wiretag, false) -} - -func decodeExtensionFromBytes(extension *ExtensionDesc, buf []byte) (interface{}, error) { - o := 0 - for o < len(buf) { - tag, n := DecodeVarint((buf)[o:]) - fieldNum := int32(tag >> 3) - wireType := int(tag & 0x7) - if o+n > len(buf) { - return nil, fmt.Errorf("unable to decode extension") - } - l, err := size((buf)[o+n:], wireType) - if err != nil { - return nil, err - } - if int32(fieldNum) == extension.Field { - if o+n+l > len(buf) { - return nil, fmt.Errorf("unable to decode extension") - } - v, err := decodeExtension((buf)[o:o+n+l], extension) - if err != nil { - return nil, err - } - return v, nil - } - o += n + l - } - return defaultExtensionValue(extension) -} - -func (this *Extension) Encode() error { - if this.enc == nil { - var err error - this.enc, err = encodeExtension(this.desc, this.value) - if err != nil { - return err - } - } - return nil -} - -func (this Extension) GoString() string { - if err := this.Encode(); err != nil { - return fmt.Sprintf("error encoding extension: %v", err) - } - return fmt.Sprintf("proto.NewExtension(%#v)", this.enc) -} - -func SetUnsafeExtension(pb Message, fieldNum int32, value interface{}) error { - typ := reflect.TypeOf(pb).Elem() - ext, ok := extensionMaps[typ] - if !ok { - return fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) - } - desc, ok := ext[fieldNum] - if !ok { - return errors.New("proto: bad extension number; not in declared ranges") - } - return SetExtension(pb, desc, value) -} - -func GetUnsafeExtension(pb Message, fieldNum int32) (interface{}, error) { - typ := reflect.TypeOf(pb).Elem() - ext, ok := extensionMaps[typ] - if !ok { - return nil, fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) - } - desc, ok := ext[fieldNum] - if !ok { - return nil, fmt.Errorf("unregistered field number %d", fieldNum) - } - return GetExtension(pb, desc) -} - -func NewUnsafeXXX_InternalExtensions(m map[int32]Extension) XXX_InternalExtensions { - x := &XXX_InternalExtensions{ - p: new(struct { - mu sync.Mutex - extensionMap map[int32]Extension - }), - } - x.p.extensionMap = m - return *x -} - -func GetUnsafeExtensionsMap(extendable Message) map[int32]Extension { - pb := extendable.(extendableProto) - return pb.extensionsWrite() -} - -func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int { - ext := pb.GetExtensions() - for offset < len(*ext) { - tag, n1 := DecodeVarint((*ext)[offset:]) - fieldNum := int32(tag >> 3) - wireType := int(tag & 0x7) - n2, err := size((*ext)[offset+n1:], wireType) - if err != nil { - panic(err) - } - newOffset := offset + n1 + n2 - if fieldNum == theFieldNum { - *ext = append((*ext)[:offset], (*ext)[newOffset:]...) - return offset - } - offset = newOffset - } - return -1 -} diff --git a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/lib.go b/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/lib.go deleted file mode 100644 index d17f802..0000000 --- a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/lib.go +++ /dev/null @@ -1,967 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package proto converts data structures to and from the wire format of -protocol buffers. It works in concert with the Go source code generated -for .proto files by the protocol compiler. - -A summary of the properties of the protocol buffer interface -for a protocol buffer variable v: - - - Names are turned from camel_case to CamelCase for export. - - There are no methods on v to set fields; just treat - them as structure fields. - - There are getters that return a field's value if set, - and return the field's default value if unset. - The getters work even if the receiver is a nil message. - - The zero value for a struct is its correct initialization state. - All desired fields must be set before marshaling. - - A Reset() method will restore a protobuf struct to its zero state. - - Non-repeated fields are pointers to the values; nil means unset. - That is, optional or required field int32 f becomes F *int32. - - Repeated fields are slices. - - Helper functions are available to aid the setting of fields. - msg.Foo = proto.String("hello") // set field - - Constants are defined to hold the default values of all fields that - have them. They have the form Default_StructName_FieldName. - Because the getter methods handle defaulted values, - direct use of these constants should be rare. - - Enums are given type names and maps from names to values. - Enum values are prefixed by the enclosing message's name, or by the - enum's type name if it is a top-level enum. Enum types have a String - method, and a Enum method to assist in message construction. - - Nested messages, groups and enums have type names prefixed with the name of - the surrounding message type. - - Extensions are given descriptor names that start with E_, - followed by an underscore-delimited list of the nested messages - that contain it (if any) followed by the CamelCased name of the - extension field itself. HasExtension, ClearExtension, GetExtension - and SetExtension are functions for manipulating extensions. - - Oneof field sets are given a single field in their message, - with distinguished wrapper types for each possible field value. - - Marshal and Unmarshal are functions to encode and decode the wire format. - -When the .proto file specifies `syntax="proto3"`, there are some differences: - - - Non-repeated fields of non-message type are values instead of pointers. - - Enum types do not get an Enum method. - -The simplest way to describe this is to see an example. -Given file test.proto, containing - - package example; - - enum FOO { X = 17; } - - message Test { - required string label = 1; - optional int32 type = 2 [default=77]; - repeated int64 reps = 3; - optional group OptionalGroup = 4 { - required string RequiredField = 5; - } - oneof union { - int32 number = 6; - string name = 7; - } - } - -The resulting file, test.pb.go, is: - - package example - - import proto "github.com/gogo/protobuf/proto" - import math "math" - - type FOO int32 - const ( - FOO_X FOO = 17 - ) - var FOO_name = map[int32]string{ - 17: "X", - } - var FOO_value = map[string]int32{ - "X": 17, - } - - func (x FOO) Enum() *FOO { - p := new(FOO) - *p = x - return p - } - func (x FOO) String() string { - return proto.EnumName(FOO_name, int32(x)) - } - func (x *FOO) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FOO_value, data) - if err != nil { - return err - } - *x = FOO(value) - return nil - } - - type Test struct { - Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` - Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` - Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` - Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` - // Types that are valid to be assigned to Union: - // *Test_Number - // *Test_Name - Union isTest_Union `protobuf_oneof:"union"` - XXX_unrecognized []byte `json:"-"` - } - func (m *Test) Reset() { *m = Test{} } - func (m *Test) String() string { return proto.CompactTextString(m) } - func (*Test) ProtoMessage() {} - - type isTest_Union interface { - isTest_Union() - } - - type Test_Number struct { - Number int32 `protobuf:"varint,6,opt,name=number"` - } - type Test_Name struct { - Name string `protobuf:"bytes,7,opt,name=name"` - } - - func (*Test_Number) isTest_Union() {} - func (*Test_Name) isTest_Union() {} - - func (m *Test) GetUnion() isTest_Union { - if m != nil { - return m.Union - } - return nil - } - const Default_Test_Type int32 = 77 - - func (m *Test) GetLabel() string { - if m != nil && m.Label != nil { - return *m.Label - } - return "" - } - - func (m *Test) GetType() int32 { - if m != nil && m.Type != nil { - return *m.Type - } - return Default_Test_Type - } - - func (m *Test) GetOptionalgroup() *Test_OptionalGroup { - if m != nil { - return m.Optionalgroup - } - return nil - } - - type Test_OptionalGroup struct { - RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` - } - func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } - func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } - - func (m *Test_OptionalGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" - } - - func (m *Test) GetNumber() int32 { - if x, ok := m.GetUnion().(*Test_Number); ok { - return x.Number - } - return 0 - } - - func (m *Test) GetName() string { - if x, ok := m.GetUnion().(*Test_Name); ok { - return x.Name - } - return "" - } - - func init() { - proto.RegisterEnum("example.FOO", FOO_name, FOO_value) - } - -To create and play with a Test object: - - package main - - import ( - "log" - - "github.com/gogo/protobuf/proto" - pb "./example.pb" - ) - - func main() { - test := &pb.Test{ - Label: proto.String("hello"), - Type: proto.Int32(17), - Reps: []int64{1, 2, 3}, - Optionalgroup: &pb.Test_OptionalGroup{ - RequiredField: proto.String("good bye"), - }, - Union: &pb.Test_Name{"fred"}, - } - data, err := proto.Marshal(test) - if err != nil { - log.Fatal("marshaling error: ", err) - } - newTest := &pb.Test{} - err = proto.Unmarshal(data, newTest) - if err != nil { - log.Fatal("unmarshaling error: ", err) - } - // Now test and newTest contain the same data. - if test.GetLabel() != newTest.GetLabel() { - log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) - } - // Use a type switch to determine which oneof was set. - switch u := test.Union.(type) { - case *pb.Test_Number: // u.Number contains the number. - case *pb.Test_Name: // u.Name contains the string. - } - // etc. - } -*/ -package proto - -import ( - "encoding/json" - "fmt" - "log" - "reflect" - "sort" - "strconv" - "sync" -) - -// RequiredNotSetError is an error type returned by either Marshal or Unmarshal. -// Marshal reports this when a required field is not initialized. -// Unmarshal reports this when a required field is missing from the wire data. -type RequiredNotSetError struct{ field string } - -func (e *RequiredNotSetError) Error() string { - if e.field == "" { - return fmt.Sprintf("proto: required field not set") - } - return fmt.Sprintf("proto: required field %q not set", e.field) -} -func (e *RequiredNotSetError) RequiredNotSet() bool { - return true -} - -type invalidUTF8Error struct{ field string } - -func (e *invalidUTF8Error) Error() string { - if e.field == "" { - return "proto: invalid UTF-8 detected" - } - return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field) -} -func (e *invalidUTF8Error) InvalidUTF8() bool { - return true -} - -// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8. -// This error should not be exposed to the external API as such errors should -// be recreated with the field information. -var errInvalidUTF8 = &invalidUTF8Error{} - -// isNonFatal reports whether the error is either a RequiredNotSet error -// or a InvalidUTF8 error. -func isNonFatal(err error) bool { - if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() { - return true - } - if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() { - return true - } - return false -} - -type nonFatal struct{ E error } - -// Merge merges err into nf and reports whether it was successful. -// Otherwise it returns false for any fatal non-nil errors. -func (nf *nonFatal) Merge(err error) (ok bool) { - if err == nil { - return true // not an error - } - if !isNonFatal(err) { - return false // fatal error - } - if nf.E == nil { - nf.E = err // store first instance of non-fatal error - } - return true -} - -// Message is implemented by generated protocol buffer messages. -type Message interface { - Reset() - String() string - ProtoMessage() -} - -// A Buffer is a buffer manager for marshaling and unmarshaling -// protocol buffers. It may be reused between invocations to -// reduce memory usage. It is not necessary to use a Buffer; -// the global functions Marshal and Unmarshal create a -// temporary Buffer and are fine for most applications. -type Buffer struct { - buf []byte // encode/decode byte stream - index int // read point - - deterministic bool -} - -// NewBuffer allocates a new Buffer and initializes its internal data to -// the contents of the argument slice. -func NewBuffer(e []byte) *Buffer { - return &Buffer{buf: e} -} - -// Reset resets the Buffer, ready for marshaling a new protocol buffer. -func (p *Buffer) Reset() { - p.buf = p.buf[0:0] // for reading/writing - p.index = 0 // for reading -} - -// SetBuf replaces the internal buffer with the slice, -// ready for unmarshaling the contents of the slice. -func (p *Buffer) SetBuf(s []byte) { - p.buf = s - p.index = 0 -} - -// Bytes returns the contents of the Buffer. -func (p *Buffer) Bytes() []byte { return p.buf } - -// SetDeterministic sets whether to use deterministic serialization. -// -// Deterministic serialization guarantees that for a given binary, equal -// messages will always be serialized to the same bytes. This implies: -// -// - Repeated serialization of a message will return the same bytes. -// - Different processes of the same binary (which may be executing on -// different machines) will serialize equal messages to the same bytes. -// -// Note that the deterministic serialization is NOT canonical across -// languages. It is not guaranteed to remain stable over time. It is unstable -// across different builds with schema changes due to unknown fields. -// Users who need canonical serialization (e.g., persistent storage in a -// canonical form, fingerprinting, etc.) should define their own -// canonicalization specification and implement their own serializer rather -// than relying on this API. -// -// If deterministic serialization is requested, map entries will be sorted -// by keys in lexographical order. This is an implementation detail and -// subject to change. -func (p *Buffer) SetDeterministic(deterministic bool) { - p.deterministic = deterministic -} - -/* - * Helper routines for simplifying the creation of optional fields of basic type. - */ - -// Bool is a helper routine that allocates a new bool value -// to store v and returns a pointer to it. -func Bool(v bool) *bool { - return &v -} - -// Int32 is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it. -func Int32(v int32) *int32 { - return &v -} - -// Int is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it, but unlike Int32 -// its argument value is an int. -func Int(v int) *int32 { - p := new(int32) - *p = int32(v) - return p -} - -// Int64 is a helper routine that allocates a new int64 value -// to store v and returns a pointer to it. -func Int64(v int64) *int64 { - return &v -} - -// Float32 is a helper routine that allocates a new float32 value -// to store v and returns a pointer to it. -func Float32(v float32) *float32 { - return &v -} - -// Float64 is a helper routine that allocates a new float64 value -// to store v and returns a pointer to it. -func Float64(v float64) *float64 { - return &v -} - -// Uint32 is a helper routine that allocates a new uint32 value -// to store v and returns a pointer to it. -func Uint32(v uint32) *uint32 { - return &v -} - -// Uint64 is a helper routine that allocates a new uint64 value -// to store v and returns a pointer to it. -func Uint64(v uint64) *uint64 { - return &v -} - -// String is a helper routine that allocates a new string value -// to store v and returns a pointer to it. -func String(v string) *string { - return &v -} - -// EnumName is a helper function to simplify printing protocol buffer enums -// by name. Given an enum map and a value, it returns a useful string. -func EnumName(m map[int32]string, v int32) string { - s, ok := m[v] - if ok { - return s - } - return strconv.Itoa(int(v)) -} - -// UnmarshalJSONEnum is a helper function to simplify recovering enum int values -// from their JSON-encoded representation. Given a map from the enum's symbolic -// names to its int values, and a byte buffer containing the JSON-encoded -// value, it returns an int32 that can be cast to the enum type by the caller. -// -// The function can deal with both JSON representations, numeric and symbolic. -func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { - if data[0] == '"' { - // New style: enums are strings. - var repr string - if err := json.Unmarshal(data, &repr); err != nil { - return -1, err - } - val, ok := m[repr] - if !ok { - return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) - } - return val, nil - } - // Old style: enums are ints. - var val int32 - if err := json.Unmarshal(data, &val); err != nil { - return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) - } - return val, nil -} - -// DebugPrint dumps the encoded data in b in a debugging format with a header -// including the string s. Used in testing but made available for general debugging. -func (p *Buffer) DebugPrint(s string, b []byte) { - var u uint64 - - obuf := p.buf - sindex := p.index - p.buf = b - p.index = 0 - depth := 0 - - fmt.Printf("\n--- %s ---\n", s) - -out: - for { - for i := 0; i < depth; i++ { - fmt.Print(" ") - } - - index := p.index - if index == len(p.buf) { - break - } - - op, err := p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: fetching op err %v\n", index, err) - break out - } - tag := op >> 3 - wire := op & 7 - - switch wire { - default: - fmt.Printf("%3d: t=%3d unknown wire=%d\n", - index, tag, wire) - break out - - case WireBytes: - var r []byte - - r, err = p.DecodeRawBytes(false) - if err != nil { - break out - } - fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) - if len(r) <= 6 { - for i := 0; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } else { - for i := 0; i < 3; i++ { - fmt.Printf(" %.2x", r[i]) - } - fmt.Printf(" ..") - for i := len(r) - 3; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } - fmt.Printf("\n") - - case WireFixed32: - u, err = p.DecodeFixed32() - if err != nil { - fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) - - case WireFixed64: - u, err = p.DecodeFixed64() - if err != nil { - fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) - - case WireVarint: - u, err = p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) - - case WireStartGroup: - fmt.Printf("%3d: t=%3d start\n", index, tag) - depth++ - - case WireEndGroup: - depth-- - fmt.Printf("%3d: t=%3d end\n", index, tag) - } - } - - if depth != 0 { - fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) - } - fmt.Printf("\n") - - p.buf = obuf - p.index = sindex -} - -// SetDefaults sets unset protocol buffer fields to their default values. -// It only modifies fields that are both unset and have defined defaults. -// It recursively sets default values in any non-nil sub-messages. -func SetDefaults(pb Message) { - setDefaults(reflect.ValueOf(pb), true, false) -} - -// v is a struct. -func setDefaults(v reflect.Value, recur, zeros bool) { - if v.Kind() == reflect.Ptr { - v = v.Elem() - } - - defaultMu.RLock() - dm, ok := defaults[v.Type()] - defaultMu.RUnlock() - if !ok { - dm = buildDefaultMessage(v.Type()) - defaultMu.Lock() - defaults[v.Type()] = dm - defaultMu.Unlock() - } - - for _, sf := range dm.scalars { - f := v.Field(sf.index) - if !f.IsNil() { - // field already set - continue - } - dv := sf.value - if dv == nil && !zeros { - // no explicit default, and don't want to set zeros - continue - } - fptr := f.Addr().Interface() // **T - // TODO: Consider batching the allocations we do here. - switch sf.kind { - case reflect.Bool: - b := new(bool) - if dv != nil { - *b = dv.(bool) - } - *(fptr.(**bool)) = b - case reflect.Float32: - f := new(float32) - if dv != nil { - *f = dv.(float32) - } - *(fptr.(**float32)) = f - case reflect.Float64: - f := new(float64) - if dv != nil { - *f = dv.(float64) - } - *(fptr.(**float64)) = f - case reflect.Int32: - // might be an enum - if ft := f.Type(); ft != int32PtrType { - // enum - f.Set(reflect.New(ft.Elem())) - if dv != nil { - f.Elem().SetInt(int64(dv.(int32))) - } - } else { - // int32 field - i := new(int32) - if dv != nil { - *i = dv.(int32) - } - *(fptr.(**int32)) = i - } - case reflect.Int64: - i := new(int64) - if dv != nil { - *i = dv.(int64) - } - *(fptr.(**int64)) = i - case reflect.String: - s := new(string) - if dv != nil { - *s = dv.(string) - } - *(fptr.(**string)) = s - case reflect.Uint8: - // exceptional case: []byte - var b []byte - if dv != nil { - db := dv.([]byte) - b = make([]byte, len(db)) - copy(b, db) - } else { - b = []byte{} - } - *(fptr.(*[]byte)) = b - case reflect.Uint32: - u := new(uint32) - if dv != nil { - *u = dv.(uint32) - } - *(fptr.(**uint32)) = u - case reflect.Uint64: - u := new(uint64) - if dv != nil { - *u = dv.(uint64) - } - *(fptr.(**uint64)) = u - default: - log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) - } - } - - for _, ni := range dm.nested { - f := v.Field(ni) - // f is *T or T or []*T or []T - switch f.Kind() { - case reflect.Struct: - setDefaults(f, recur, zeros) - - case reflect.Ptr: - if f.IsNil() { - continue - } - setDefaults(f, recur, zeros) - - case reflect.Slice: - for i := 0; i < f.Len(); i++ { - e := f.Index(i) - if e.Kind() == reflect.Ptr && e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - - case reflect.Map: - for _, k := range f.MapKeys() { - e := f.MapIndex(k) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - } - } -} - -var ( - // defaults maps a protocol buffer struct type to a slice of the fields, - // with its scalar fields set to their proto-declared non-zero default values. - defaultMu sync.RWMutex - defaults = make(map[reflect.Type]defaultMessage) - - int32PtrType = reflect.TypeOf((*int32)(nil)) -) - -// defaultMessage represents information about the default values of a message. -type defaultMessage struct { - scalars []scalarField - nested []int // struct field index of nested messages -} - -type scalarField struct { - index int // struct field index - kind reflect.Kind // element type (the T in *T or []T) - value interface{} // the proto-declared default value, or nil -} - -// t is a struct type. -func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { - sprop := GetProperties(t) - for _, prop := range sprop.Prop { - fi, ok := sprop.decoderTags.get(prop.Tag) - if !ok { - // XXX_unrecognized - continue - } - ft := t.Field(fi).Type - - sf, nested, err := fieldDefault(ft, prop) - switch { - case err != nil: - log.Print(err) - case nested: - dm.nested = append(dm.nested, fi) - case sf != nil: - sf.index = fi - dm.scalars = append(dm.scalars, *sf) - } - } - - return dm -} - -// fieldDefault returns the scalarField for field type ft. -// sf will be nil if the field can not have a default. -// nestedMessage will be true if this is a nested message. -// Note that sf.index is not set on return. -func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { - var canHaveDefault bool - switch ft.Kind() { - case reflect.Struct: - nestedMessage = true // non-nullable - - case reflect.Ptr: - if ft.Elem().Kind() == reflect.Struct { - nestedMessage = true - } else { - canHaveDefault = true // proto2 scalar field - } - - case reflect.Slice: - switch ft.Elem().Kind() { - case reflect.Ptr, reflect.Struct: - nestedMessage = true // repeated message - case reflect.Uint8: - canHaveDefault = true // bytes field - } - - case reflect.Map: - if ft.Elem().Kind() == reflect.Ptr { - nestedMessage = true // map with message values - } - } - - if !canHaveDefault { - if nestedMessage { - return nil, true, nil - } - return nil, false, nil - } - - // We now know that ft is a pointer or slice. - sf = &scalarField{kind: ft.Elem().Kind()} - - // scalar fields without defaults - if !prop.HasDefault { - return sf, false, nil - } - - // a scalar field: either *T or []byte - switch ft.Elem().Kind() { - case reflect.Bool: - x, err := strconv.ParseBool(prop.Default) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Float32: - x, err := strconv.ParseFloat(prop.Default, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) - } - sf.value = float32(x) - case reflect.Float64: - x, err := strconv.ParseFloat(prop.Default, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Int32: - x, err := strconv.ParseInt(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) - } - sf.value = int32(x) - case reflect.Int64: - x, err := strconv.ParseInt(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.String: - sf.value = prop.Default - case reflect.Uint8: - // []byte (not *uint8) - sf.value = []byte(prop.Default) - case reflect.Uint32: - x, err := strconv.ParseUint(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) - } - sf.value = uint32(x) - case reflect.Uint64: - x, err := strconv.ParseUint(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) - } - sf.value = x - default: - return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) - } - - return sf, false, nil -} - -// mapKeys returns a sort.Interface to be used for sorting the map keys. -// Map fields may have key types of non-float scalars, strings and enums. -func mapKeys(vs []reflect.Value) sort.Interface { - s := mapKeySorter{vs: vs} - - // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps. - if len(vs) == 0 { - return s - } - switch vs[0].Kind() { - case reflect.Int32, reflect.Int64: - s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } - case reflect.Uint32, reflect.Uint64: - s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } - case reflect.Bool: - s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true - case reflect.String: - s.less = func(a, b reflect.Value) bool { return a.String() < b.String() } - default: - panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind())) - } - - return s -} - -type mapKeySorter struct { - vs []reflect.Value - less func(a, b reflect.Value) bool -} - -func (s mapKeySorter) Len() int { return len(s.vs) } -func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } -func (s mapKeySorter) Less(i, j int) bool { - return s.less(s.vs[i], s.vs[j]) -} - -// isProto3Zero reports whether v is a zero proto3 value. -func isProto3Zero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return !v.Bool() - case reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint32, reflect.Uint64: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.String: - return v.String() == "" - } - return false -} - -// ProtoPackageIsVersion2 is referenced from generated protocol buffer files -// to assert that that code is compatible with this version of the proto package. -const GoGoProtoPackageIsVersion2 = true - -// ProtoPackageIsVersion1 is referenced from generated protocol buffer files -// to assert that that code is compatible with this version of the proto package. -const GoGoProtoPackageIsVersion1 = true - -// InternalMessageInfo is a type used internally by generated .pb.go files. -// This type is not intended to be used by non-generated code. -// This type is not subject to any compatibility guarantee. -type InternalMessageInfo struct { - marshal *marshalInfo - unmarshal *unmarshalInfo - merge *mergeInfo - discard *discardInfo -} diff --git a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/lib_gogo.go b/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/lib_gogo.go deleted file mode 100644 index b3aa391..0000000 --- a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/lib_gogo.go +++ /dev/null @@ -1,50 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "encoding/json" - "strconv" -) - -type Sizer interface { - Size() int -} - -type ProtoSizer interface { - ProtoSize() int -} - -func MarshalJSONEnum(m map[int32]string, value int32) ([]byte, error) { - s, ok := m[value] - if !ok { - s = strconv.Itoa(int(value)) - } - return json.Marshal(s) -} diff --git a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/message_set.go b/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/message_set.go deleted file mode 100644 index f48a756..0000000 --- a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/message_set.go +++ /dev/null @@ -1,181 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Support for message sets. - */ - -import ( - "errors" -) - -// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. -// A message type ID is required for storing a protocol buffer in a message set. -var errNoMessageTypeID = errors.New("proto does not have a message type ID") - -// The first two types (_MessageSet_Item and messageSet) -// model what the protocol compiler produces for the following protocol message: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } -// That is the MessageSet wire format. We can't use a proto to generate these -// because that would introduce a circular dependency between it and this package. - -type _MessageSet_Item struct { - TypeId *int32 `protobuf:"varint,2,req,name=type_id"` - Message []byte `protobuf:"bytes,3,req,name=message"` -} - -type messageSet struct { - Item []*_MessageSet_Item `protobuf:"group,1,rep"` - XXX_unrecognized []byte - // TODO: caching? -} - -// Make sure messageSet is a Message. -var _ Message = (*messageSet)(nil) - -// messageTypeIder is an interface satisfied by a protocol buffer type -// that may be stored in a MessageSet. -type messageTypeIder interface { - MessageTypeId() int32 -} - -func (ms *messageSet) find(pb Message) *_MessageSet_Item { - mti, ok := pb.(messageTypeIder) - if !ok { - return nil - } - id := mti.MessageTypeId() - for _, item := range ms.Item { - if *item.TypeId == id { - return item - } - } - return nil -} - -func (ms *messageSet) Has(pb Message) bool { - return ms.find(pb) != nil -} - -func (ms *messageSet) Unmarshal(pb Message) error { - if item := ms.find(pb); item != nil { - return Unmarshal(item.Message, pb) - } - if _, ok := pb.(messageTypeIder); !ok { - return errNoMessageTypeID - } - return nil // TODO: return error instead? -} - -func (ms *messageSet) Marshal(pb Message) error { - msg, err := Marshal(pb) - if err != nil { - return err - } - if item := ms.find(pb); item != nil { - // reuse existing item - item.Message = msg - return nil - } - - mti, ok := pb.(messageTypeIder) - if !ok { - return errNoMessageTypeID - } - - mtid := mti.MessageTypeId() - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: &mtid, - Message: msg, - }) - return nil -} - -func (ms *messageSet) Reset() { *ms = messageSet{} } -func (ms *messageSet) String() string { return CompactTextString(ms) } -func (*messageSet) ProtoMessage() {} - -// Support for the message_set_wire_format message option. - -func skipVarint(buf []byte) []byte { - i := 0 - for ; buf[i]&0x80 != 0; i++ { - } - return buf[i+1:] -} - -// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. -// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. -func unmarshalMessageSet(buf []byte, exts interface{}) error { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - m = exts.extensionsWrite() - case map[int32]Extension: - m = exts - default: - return errors.New("proto: not an extension map") - } - - ms := new(messageSet) - if err := Unmarshal(buf, ms); err != nil { - return err - } - for _, item := range ms.Item { - id := *item.TypeId - msg := item.Message - - // Restore wire type and field number varint, plus length varint. - // Be careful to preserve duplicate items. - b := EncodeVarint(uint64(id)<<3 | WireBytes) - if ext, ok := m[id]; ok { - // Existing data; rip off the tag and length varint - // so we join the new data correctly. - // We can assume that ext.enc is set because we are unmarshaling. - o := ext.enc[len(b):] // skip wire type and field number - _, n := DecodeVarint(o) // calculate length of length varint - o = o[n:] // skip length varint - msg = append(o, msg...) // join old data and new data - } - b = append(b, EncodeVarint(uint64(len(msg)))...) - b = append(b, msg...) - - m[id] = Extension{enc: b} - } - return nil -} diff --git a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go b/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go deleted file mode 100644 index b6cad90..0000000 --- a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go +++ /dev/null @@ -1,357 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build purego appengine js - -// This file contains an implementation of proto field accesses using package reflect. -// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can -// be used on App Engine. - -package proto - -import ( - "reflect" - "sync" -) - -const unsafeAllowed = false - -// A field identifies a field in a struct, accessible from a pointer. -// In this implementation, a field is identified by the sequence of field indices -// passed to reflect's FieldByIndex. -type field []int - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return f.Index -} - -// invalidField is an invalid field identifier. -var invalidField = field(nil) - -// zeroField is a noop when calling pointer.offset. -var zeroField = field([]int{}) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { return f != nil } - -// The pointer type is for the table-driven decoder. -// The implementation here uses a reflect.Value of pointer type to -// create a generic pointer. In pointer_unsafe.go we use unsafe -// instead of reflect to implement the same (but faster) interface. -type pointer struct { - v reflect.Value -} - -// toPointer converts an interface of pointer type to a pointer -// that points to the same target. -func toPointer(i *Message) pointer { - return pointer{v: reflect.ValueOf(*i)} -} - -// toAddrPointer converts an interface to a pointer that points to -// the interface data. -func toAddrPointer(i *interface{}, isptr bool) pointer { - v := reflect.ValueOf(*i) - u := reflect.New(v.Type()) - u.Elem().Set(v) - return pointer{v: u} -} - -// valToPointer converts v to a pointer. v must be of pointer type. -func valToPointer(v reflect.Value) pointer { - return pointer{v: v} -} - -// offset converts from a pointer to a structure to a pointer to -// one of its fields. -func (p pointer) offset(f field) pointer { - return pointer{v: p.v.Elem().FieldByIndex(f).Addr()} -} - -func (p pointer) isNil() bool { - return p.v.IsNil() -} - -// grow updates the slice s in place to make it one element longer. -// s must be addressable. -// Returns the (addressable) new element. -func grow(s reflect.Value) reflect.Value { - n, m := s.Len(), s.Cap() - if n < m { - s.SetLen(n + 1) - } else { - s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem()))) - } - return s.Index(n) -} - -func (p pointer) toInt64() *int64 { - return p.v.Interface().(*int64) -} -func (p pointer) toInt64Ptr() **int64 { - return p.v.Interface().(**int64) -} -func (p pointer) toInt64Slice() *[]int64 { - return p.v.Interface().(*[]int64) -} - -var int32ptr = reflect.TypeOf((*int32)(nil)) - -func (p pointer) toInt32() *int32 { - return p.v.Convert(int32ptr).Interface().(*int32) -} - -// The toInt32Ptr/Slice methods don't work because of enums. -// Instead, we must use set/get methods for the int32ptr/slice case. -/* - func (p pointer) toInt32Ptr() **int32 { - return p.v.Interface().(**int32) -} - func (p pointer) toInt32Slice() *[]int32 { - return p.v.Interface().(*[]int32) -} -*/ -func (p pointer) getInt32Ptr() *int32 { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - return p.v.Elem().Interface().(*int32) - } - // an enum - return p.v.Elem().Convert(int32PtrType).Interface().(*int32) -} -func (p pointer) setInt32Ptr(v int32) { - // Allocate value in a *int32. Possibly convert that to a *enum. - // Then assign it to a **int32 or **enum. - // Note: we can convert *int32 to *enum, but we can't convert - // **int32 to **enum! - p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem())) -} - -// getInt32Slice copies []int32 from p as a new slice. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) getInt32Slice() []int32 { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - return p.v.Elem().Interface().([]int32) - } - // an enum - // Allocate a []int32, then assign []enum's values into it. - // Note: we can't convert []enum to []int32. - slice := p.v.Elem() - s := make([]int32, slice.Len()) - for i := 0; i < slice.Len(); i++ { - s[i] = int32(slice.Index(i).Int()) - } - return s -} - -// setInt32Slice copies []int32 into p as a new slice. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) setInt32Slice(v []int32) { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - p.v.Elem().Set(reflect.ValueOf(v)) - return - } - // an enum - // Allocate a []enum, then assign []int32's values into it. - // Note: we can't convert []enum to []int32. - slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v)) - for i, x := range v { - slice.Index(i).SetInt(int64(x)) - } - p.v.Elem().Set(slice) -} -func (p pointer) appendInt32Slice(v int32) { - grow(p.v.Elem()).SetInt(int64(v)) -} - -func (p pointer) toUint64() *uint64 { - return p.v.Interface().(*uint64) -} -func (p pointer) toUint64Ptr() **uint64 { - return p.v.Interface().(**uint64) -} -func (p pointer) toUint64Slice() *[]uint64 { - return p.v.Interface().(*[]uint64) -} -func (p pointer) toUint32() *uint32 { - return p.v.Interface().(*uint32) -} -func (p pointer) toUint32Ptr() **uint32 { - return p.v.Interface().(**uint32) -} -func (p pointer) toUint32Slice() *[]uint32 { - return p.v.Interface().(*[]uint32) -} -func (p pointer) toBool() *bool { - return p.v.Interface().(*bool) -} -func (p pointer) toBoolPtr() **bool { - return p.v.Interface().(**bool) -} -func (p pointer) toBoolSlice() *[]bool { - return p.v.Interface().(*[]bool) -} -func (p pointer) toFloat64() *float64 { - return p.v.Interface().(*float64) -} -func (p pointer) toFloat64Ptr() **float64 { - return p.v.Interface().(**float64) -} -func (p pointer) toFloat64Slice() *[]float64 { - return p.v.Interface().(*[]float64) -} -func (p pointer) toFloat32() *float32 { - return p.v.Interface().(*float32) -} -func (p pointer) toFloat32Ptr() **float32 { - return p.v.Interface().(**float32) -} -func (p pointer) toFloat32Slice() *[]float32 { - return p.v.Interface().(*[]float32) -} -func (p pointer) toString() *string { - return p.v.Interface().(*string) -} -func (p pointer) toStringPtr() **string { - return p.v.Interface().(**string) -} -func (p pointer) toStringSlice() *[]string { - return p.v.Interface().(*[]string) -} -func (p pointer) toBytes() *[]byte { - return p.v.Interface().(*[]byte) -} -func (p pointer) toBytesSlice() *[][]byte { - return p.v.Interface().(*[][]byte) -} -func (p pointer) toExtensions() *XXX_InternalExtensions { - return p.v.Interface().(*XXX_InternalExtensions) -} -func (p pointer) toOldExtensions() *map[int32]Extension { - return p.v.Interface().(*map[int32]Extension) -} -func (p pointer) getPointer() pointer { - return pointer{v: p.v.Elem()} -} -func (p pointer) setPointer(q pointer) { - p.v.Elem().Set(q.v) -} -func (p pointer) appendPointer(q pointer) { - grow(p.v.Elem()).Set(q.v) -} - -// getPointerSlice copies []*T from p as a new []pointer. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) getPointerSlice() []pointer { - if p.v.IsNil() { - return nil - } - n := p.v.Elem().Len() - s := make([]pointer, n) - for i := 0; i < n; i++ { - s[i] = pointer{v: p.v.Elem().Index(i)} - } - return s -} - -// setPointerSlice copies []pointer into p as a new []*T. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) setPointerSlice(v []pointer) { - if v == nil { - p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem()) - return - } - s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v)) - for _, p := range v { - s = reflect.Append(s, p.v) - } - p.v.Elem().Set(s) -} - -// getInterfacePointer returns a pointer that points to the -// interface data of the interface pointed by p. -func (p pointer) getInterfacePointer() pointer { - if p.v.Elem().IsNil() { - return pointer{v: p.v.Elem()} - } - return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct -} - -func (p pointer) asPointerTo(t reflect.Type) reflect.Value { - // TODO: check that p.v.Type().Elem() == t? - return p.v -} - -func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} - -var atomicLock sync.Mutex diff --git a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go b/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go deleted file mode 100644 index 7ffd3c2..0000000 --- a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go +++ /dev/null @@ -1,59 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build purego appengine js - -// This file contains an implementation of proto field accesses using package reflect. -// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can -// be used on App Engine. - -package proto - -import ( - "reflect" -) - -// TODO: untested, so probably incorrect. - -func (p pointer) getRef() pointer { - return pointer{v: p.v.Addr()} -} - -func (p pointer) appendRef(v pointer, typ reflect.Type) { - slice := p.getSlice(typ) - elem := v.asPointerTo(typ).Elem() - newSlice := reflect.Append(slice, elem) - slice.Set(newSlice) -} - -func (p pointer) getSlice(typ reflect.Type) reflect.Value { - sliceTyp := reflect.SliceOf(typ) - slice := p.asPointerTo(sliceTyp) - slice = slice.Elem() - return slice -} diff --git a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go b/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go deleted file mode 100644 index d55a335..0000000 --- a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go +++ /dev/null @@ -1,308 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !purego,!appengine,!js - -// This file contains the implementation of the proto field accesses using package unsafe. - -package proto - -import ( - "reflect" - "sync/atomic" - "unsafe" -) - -const unsafeAllowed = true - -// A field identifies a field in a struct, accessible from a pointer. -// In this implementation, a field is identified by its byte offset from the start of the struct. -type field uintptr - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return field(f.Offset) -} - -// invalidField is an invalid field identifier. -const invalidField = ^field(0) - -// zeroField is a noop when calling pointer.offset. -const zeroField = field(0) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { - return f != invalidField -} - -// The pointer type below is for the new table-driven encoder/decoder. -// The implementation here uses unsafe.Pointer to create a generic pointer. -// In pointer_reflect.go we use reflect instead of unsafe to implement -// the same (but slower) interface. -type pointer struct { - p unsafe.Pointer -} - -// size of pointer -var ptrSize = unsafe.Sizeof(uintptr(0)) - -// toPointer converts an interface of pointer type to a pointer -// that points to the same target. -func toPointer(i *Message) pointer { - // Super-tricky - read pointer out of data word of interface value. - // Saves ~25ns over the equivalent: - // return valToPointer(reflect.ValueOf(*i)) - return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} -} - -// toAddrPointer converts an interface to a pointer that points to -// the interface data. -func toAddrPointer(i *interface{}, isptr bool) pointer { - // Super-tricky - read or get the address of data word of interface value. - if isptr { - // The interface is of pointer type, thus it is a direct interface. - // The data word is the pointer data itself. We take its address. - return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} - } - // The interface is not of pointer type. The data word is the pointer - // to the data. - return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} -} - -// valToPointer converts v to a pointer. v must be of pointer type. -func valToPointer(v reflect.Value) pointer { - return pointer{p: unsafe.Pointer(v.Pointer())} -} - -// offset converts from a pointer to a structure to a pointer to -// one of its fields. -func (p pointer) offset(f field) pointer { - // For safety, we should panic if !f.IsValid, however calling panic causes - // this to no longer be inlineable, which is a serious performance cost. - /* - if !f.IsValid() { - panic("invalid field") - } - */ - return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} -} - -func (p pointer) isNil() bool { - return p.p == nil -} - -func (p pointer) toInt64() *int64 { - return (*int64)(p.p) -} -func (p pointer) toInt64Ptr() **int64 { - return (**int64)(p.p) -} -func (p pointer) toInt64Slice() *[]int64 { - return (*[]int64)(p.p) -} -func (p pointer) toInt32() *int32 { - return (*int32)(p.p) -} - -// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist. -/* - func (p pointer) toInt32Ptr() **int32 { - return (**int32)(p.p) - } - func (p pointer) toInt32Slice() *[]int32 { - return (*[]int32)(p.p) - } -*/ -func (p pointer) getInt32Ptr() *int32 { - return *(**int32)(p.p) -} -func (p pointer) setInt32Ptr(v int32) { - *(**int32)(p.p) = &v -} - -// getInt32Slice loads a []int32 from p. -// The value returned is aliased with the original slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) getInt32Slice() []int32 { - return *(*[]int32)(p.p) -} - -// setInt32Slice stores a []int32 to p. -// The value set is aliased with the input slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) setInt32Slice(v []int32) { - *(*[]int32)(p.p) = v -} - -// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead? -func (p pointer) appendInt32Slice(v int32) { - s := (*[]int32)(p.p) - *s = append(*s, v) -} - -func (p pointer) toUint64() *uint64 { - return (*uint64)(p.p) -} -func (p pointer) toUint64Ptr() **uint64 { - return (**uint64)(p.p) -} -func (p pointer) toUint64Slice() *[]uint64 { - return (*[]uint64)(p.p) -} -func (p pointer) toUint32() *uint32 { - return (*uint32)(p.p) -} -func (p pointer) toUint32Ptr() **uint32 { - return (**uint32)(p.p) -} -func (p pointer) toUint32Slice() *[]uint32 { - return (*[]uint32)(p.p) -} -func (p pointer) toBool() *bool { - return (*bool)(p.p) -} -func (p pointer) toBoolPtr() **bool { - return (**bool)(p.p) -} -func (p pointer) toBoolSlice() *[]bool { - return (*[]bool)(p.p) -} -func (p pointer) toFloat64() *float64 { - return (*float64)(p.p) -} -func (p pointer) toFloat64Ptr() **float64 { - return (**float64)(p.p) -} -func (p pointer) toFloat64Slice() *[]float64 { - return (*[]float64)(p.p) -} -func (p pointer) toFloat32() *float32 { - return (*float32)(p.p) -} -func (p pointer) toFloat32Ptr() **float32 { - return (**float32)(p.p) -} -func (p pointer) toFloat32Slice() *[]float32 { - return (*[]float32)(p.p) -} -func (p pointer) toString() *string { - return (*string)(p.p) -} -func (p pointer) toStringPtr() **string { - return (**string)(p.p) -} -func (p pointer) toStringSlice() *[]string { - return (*[]string)(p.p) -} -func (p pointer) toBytes() *[]byte { - return (*[]byte)(p.p) -} -func (p pointer) toBytesSlice() *[][]byte { - return (*[][]byte)(p.p) -} -func (p pointer) toExtensions() *XXX_InternalExtensions { - return (*XXX_InternalExtensions)(p.p) -} -func (p pointer) toOldExtensions() *map[int32]Extension { - return (*map[int32]Extension)(p.p) -} - -// getPointerSlice loads []*T from p as a []pointer. -// The value returned is aliased with the original slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) getPointerSlice() []pointer { - // Super-tricky - p should point to a []*T where T is a - // message type. We load it as []pointer. - return *(*[]pointer)(p.p) -} - -// setPointerSlice stores []pointer into p as a []*T. -// The value set is aliased with the input slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) setPointerSlice(v []pointer) { - // Super-tricky - p should point to a []*T where T is a - // message type. We store it as []pointer. - *(*[]pointer)(p.p) = v -} - -// getPointer loads the pointer at p and returns it. -func (p pointer) getPointer() pointer { - return pointer{p: *(*unsafe.Pointer)(p.p)} -} - -// setPointer stores the pointer q at p. -func (p pointer) setPointer(q pointer) { - *(*unsafe.Pointer)(p.p) = q.p -} - -// append q to the slice pointed to by p. -func (p pointer) appendPointer(q pointer) { - s := (*[]unsafe.Pointer)(p.p) - *s = append(*s, q.p) -} - -// getInterfacePointer returns a pointer that points to the -// interface data of the interface pointed by p. -func (p pointer) getInterfacePointer() pointer { - // Super-tricky - read pointer out of data word of interface value. - return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]} -} - -// asPointerTo returns a reflect.Value that is a pointer to an -// object of type t stored at p. -func (p pointer) asPointerTo(t reflect.Type) reflect.Value { - return reflect.NewAt(t, p.p) -} - -func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { - return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { - return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { - return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { - return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} diff --git a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go b/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go deleted file mode 100644 index aca8eed..0000000 --- a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go +++ /dev/null @@ -1,56 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !purego,!appengine,!js - -// This file contains the implementation of the proto field accesses using package unsafe. - -package proto - -import ( - "reflect" - "unsafe" -) - -func (p pointer) getRef() pointer { - return pointer{p: (unsafe.Pointer)(&p.p)} -} - -func (p pointer) appendRef(v pointer, typ reflect.Type) { - slice := p.getSlice(typ) - elem := v.asPointerTo(typ).Elem() - newSlice := reflect.Append(slice, elem) - slice.Set(newSlice) -} - -func (p pointer) getSlice(typ reflect.Type) reflect.Value { - sliceTyp := reflect.SliceOf(typ) - slice := p.asPointerTo(sliceTyp) - slice = slice.Elem() - return slice -} diff --git a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/properties.go b/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/properties.go deleted file mode 100644 index c9e5fa0..0000000 --- a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/properties.go +++ /dev/null @@ -1,599 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "fmt" - "log" - "os" - "reflect" - "sort" - "strconv" - "strings" - "sync" -) - -const debug bool = false - -// Constants that identify the encoding of a value on the wire. -const ( - WireVarint = 0 - WireFixed64 = 1 - WireBytes = 2 - WireStartGroup = 3 - WireEndGroup = 4 - WireFixed32 = 5 -) - -// tagMap is an optimization over map[int]int for typical protocol buffer -// use-cases. Encoded protocol buffers are often in tag order with small tag -// numbers. -type tagMap struct { - fastTags []int - slowTags map[int]int -} - -// tagMapFastLimit is the upper bound on the tag number that will be stored in -// the tagMap slice rather than its map. -const tagMapFastLimit = 1024 - -func (p *tagMap) get(t int) (int, bool) { - if t > 0 && t < tagMapFastLimit { - if t >= len(p.fastTags) { - return 0, false - } - fi := p.fastTags[t] - return fi, fi >= 0 - } - fi, ok := p.slowTags[t] - return fi, ok -} - -func (p *tagMap) put(t int, fi int) { - if t > 0 && t < tagMapFastLimit { - for len(p.fastTags) < t+1 { - p.fastTags = append(p.fastTags, -1) - } - p.fastTags[t] = fi - return - } - if p.slowTags == nil { - p.slowTags = make(map[int]int) - } - p.slowTags[t] = fi -} - -// StructProperties represents properties for all the fields of a struct. -// decoderTags and decoderOrigNames should only be used by the decoder. -type StructProperties struct { - Prop []*Properties // properties for each field - reqCount int // required count - decoderTags tagMap // map from proto tag to struct field number - decoderOrigNames map[string]int // map from original name to struct field number - order []int // list of struct field numbers in tag order - - // OneofTypes contains information about the oneof fields in this message. - // It is keyed by the original name of a field. - OneofTypes map[string]*OneofProperties -} - -// OneofProperties represents information about a specific field in a oneof. -type OneofProperties struct { - Type reflect.Type // pointer to generated struct type for this oneof field - Field int // struct field number of the containing oneof in the message - Prop *Properties -} - -// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. -// See encode.go, (*Buffer).enc_struct. - -func (sp *StructProperties) Len() int { return len(sp.order) } -func (sp *StructProperties) Less(i, j int) bool { - return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag -} -func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } - -// Properties represents the protocol-specific behavior of a single struct field. -type Properties struct { - Name string // name of the field, for error messages - OrigName string // original name before protocol compiler (always set) - JSONName string // name to use for JSON; determined by protoc - Wire string - WireType int - Tag int - Required bool - Optional bool - Repeated bool - Packed bool // relevant for repeated primitives only - Enum string // set for enum types only - proto3 bool // whether this is known to be a proto3 field - oneof bool // whether this is a oneof field - - Default string // default value - HasDefault bool // whether an explicit default was provided - CustomType string - CastType string - StdTime bool - StdDuration bool - WktPointer bool - - stype reflect.Type // set for struct types only - ctype reflect.Type // set for custom types only - sprop *StructProperties // set for struct types only - - mtype reflect.Type // set for map types only - MapKeyProp *Properties // set for map types only - MapValProp *Properties // set for map types only -} - -// String formats the properties in the protobuf struct field tag style. -func (p *Properties) String() string { - s := p.Wire - s += "," - s += strconv.Itoa(p.Tag) - if p.Required { - s += ",req" - } - if p.Optional { - s += ",opt" - } - if p.Repeated { - s += ",rep" - } - if p.Packed { - s += ",packed" - } - s += ",name=" + p.OrigName - if p.JSONName != p.OrigName { - s += ",json=" + p.JSONName - } - if p.proto3 { - s += ",proto3" - } - if p.oneof { - s += ",oneof" - } - if len(p.Enum) > 0 { - s += ",enum=" + p.Enum - } - if p.HasDefault { - s += ",def=" + p.Default - } - return s -} - -// Parse populates p by parsing a string in the protobuf struct field tag style. -func (p *Properties) Parse(s string) { - // "bytes,49,opt,name=foo,def=hello!" - fields := strings.Split(s, ",") // breaks def=, but handled below. - if len(fields) < 2 { - fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) - return - } - - p.Wire = fields[0] - switch p.Wire { - case "varint": - p.WireType = WireVarint - case "fixed32": - p.WireType = WireFixed32 - case "fixed64": - p.WireType = WireFixed64 - case "zigzag32": - p.WireType = WireVarint - case "zigzag64": - p.WireType = WireVarint - case "bytes", "group": - p.WireType = WireBytes - // no numeric converter for non-numeric types - default: - fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) - return - } - - var err error - p.Tag, err = strconv.Atoi(fields[1]) - if err != nil { - return - } - -outer: - for i := 2; i < len(fields); i++ { - f := fields[i] - switch { - case f == "req": - p.Required = true - case f == "opt": - p.Optional = true - case f == "rep": - p.Repeated = true - case f == "packed": - p.Packed = true - case strings.HasPrefix(f, "name="): - p.OrigName = f[5:] - case strings.HasPrefix(f, "json="): - p.JSONName = f[5:] - case strings.HasPrefix(f, "enum="): - p.Enum = f[5:] - case f == "proto3": - p.proto3 = true - case f == "oneof": - p.oneof = true - case strings.HasPrefix(f, "def="): - p.HasDefault = true - p.Default = f[4:] // rest of string - if i+1 < len(fields) { - // Commas aren't escaped, and def is always last. - p.Default += "," + strings.Join(fields[i+1:], ",") - break outer - } - case strings.HasPrefix(f, "embedded="): - p.OrigName = strings.Split(f, "=")[1] - case strings.HasPrefix(f, "customtype="): - p.CustomType = strings.Split(f, "=")[1] - case strings.HasPrefix(f, "casttype="): - p.CastType = strings.Split(f, "=")[1] - case f == "stdtime": - p.StdTime = true - case f == "stdduration": - p.StdDuration = true - case f == "wktptr": - p.WktPointer = true - } - } -} - -var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() - -// setFieldProps initializes the field properties for submessages and maps. -func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { - isMap := typ.Kind() == reflect.Map - if len(p.CustomType) > 0 && !isMap { - p.ctype = typ - p.setTag(lockGetProp) - return - } - if p.StdTime && !isMap { - p.setTag(lockGetProp) - return - } - if p.StdDuration && !isMap { - p.setTag(lockGetProp) - return - } - if p.WktPointer && !isMap { - p.setTag(lockGetProp) - return - } - switch t1 := typ; t1.Kind() { - case reflect.Struct: - p.stype = typ - case reflect.Ptr: - if t1.Elem().Kind() == reflect.Struct { - p.stype = t1.Elem() - } - case reflect.Slice: - switch t2 := t1.Elem(); t2.Kind() { - case reflect.Ptr: - switch t3 := t2.Elem(); t3.Kind() { - case reflect.Struct: - p.stype = t3 - } - case reflect.Struct: - p.stype = t2 - } - - case reflect.Map: - - p.mtype = t1 - p.MapKeyProp = &Properties{} - p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) - p.MapValProp = &Properties{} - vtype := p.mtype.Elem() - if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { - // The value type is not a message (*T) or bytes ([]byte), - // so we need encoders for the pointer to this type. - vtype = reflect.PtrTo(vtype) - } - - p.MapValProp.CustomType = p.CustomType - p.MapValProp.StdDuration = p.StdDuration - p.MapValProp.StdTime = p.StdTime - p.MapValProp.WktPointer = p.WktPointer - p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) - } - p.setTag(lockGetProp) -} - -func (p *Properties) setTag(lockGetProp bool) { - if p.stype != nil { - if lockGetProp { - p.sprop = GetProperties(p.stype) - } else { - p.sprop = getPropertiesLocked(p.stype) - } - } -} - -var ( - marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() -) - -// Init populates the properties from a protocol buffer struct tag. -func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { - p.init(typ, name, tag, f, true) -} - -func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { - // "bytes,49,opt,def=hello!" - p.Name = name - p.OrigName = name - if tag == "" { - return - } - p.Parse(tag) - p.setFieldProps(typ, f, lockGetProp) -} - -var ( - propertiesMu sync.RWMutex - propertiesMap = make(map[reflect.Type]*StructProperties) -) - -// GetProperties returns the list of properties for the type represented by t. -// t must represent a generated struct type of a protocol message. -func GetProperties(t reflect.Type) *StructProperties { - if t.Kind() != reflect.Struct { - panic("proto: type must have kind struct") - } - - // Most calls to GetProperties in a long-running program will be - // retrieving details for types we have seen before. - propertiesMu.RLock() - sprop, ok := propertiesMap[t] - propertiesMu.RUnlock() - if ok { - return sprop - } - - propertiesMu.Lock() - sprop = getPropertiesLocked(t) - propertiesMu.Unlock() - return sprop -} - -// getPropertiesLocked requires that propertiesMu is held. -func getPropertiesLocked(t reflect.Type) *StructProperties { - if prop, ok := propertiesMap[t]; ok { - return prop - } - - prop := new(StructProperties) - // in case of recursive protos, fill this in now. - propertiesMap[t] = prop - - // build properties - prop.Prop = make([]*Properties, t.NumField()) - prop.order = make([]int, t.NumField()) - - isOneofMessage := false - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - p := new(Properties) - name := f.Name - p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) - - oneof := f.Tag.Get("protobuf_oneof") // special case - if oneof != "" { - isOneofMessage = true - // Oneof fields don't use the traditional protobuf tag. - p.OrigName = oneof - } - prop.Prop[i] = p - prop.order[i] = i - if debug { - print(i, " ", f.Name, " ", t.String(), " ") - if p.Tag > 0 { - print(p.String()) - } - print("\n") - } - } - - // Re-order prop.order. - sort.Sort(prop) - - type oneofMessage interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) - } - if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); isOneofMessage && ok { - var oots []interface{} - _, _, _, oots = om.XXX_OneofFuncs() - - // Interpret oneof metadata. - prop.OneofTypes = make(map[string]*OneofProperties) - for _, oot := range oots { - oop := &OneofProperties{ - Type: reflect.ValueOf(oot).Type(), // *T - Prop: new(Properties), - } - sft := oop.Type.Elem().Field(0) - oop.Prop.Name = sft.Name - oop.Prop.Parse(sft.Tag.Get("protobuf")) - // There will be exactly one interface field that - // this new value is assignable to. - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if f.Type.Kind() != reflect.Interface { - continue - } - if !oop.Type.AssignableTo(f.Type) { - continue - } - oop.Field = i - break - } - prop.OneofTypes[oop.Prop.OrigName] = oop - } - } - - // build required counts - // build tags - reqCount := 0 - prop.decoderOrigNames = make(map[string]int) - for i, p := range prop.Prop { - if strings.HasPrefix(p.Name, "XXX_") { - // Internal fields should not appear in tags/origNames maps. - // They are handled specially when encoding and decoding. - continue - } - if p.Required { - reqCount++ - } - prop.decoderTags.put(p.Tag, i) - prop.decoderOrigNames[p.OrigName] = i - } - prop.reqCount = reqCount - - return prop -} - -// A global registry of enum types. -// The generated code will register the generated maps by calling RegisterEnum. - -var enumValueMaps = make(map[string]map[string]int32) -var enumStringMaps = make(map[string]map[int32]string) - -// RegisterEnum is called from the generated code to install the enum descriptor -// maps into the global table to aid parsing text format protocol buffers. -func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { - if _, ok := enumValueMaps[typeName]; ok { - panic("proto: duplicate enum registered: " + typeName) - } - enumValueMaps[typeName] = valueMap - if _, ok := enumStringMaps[typeName]; ok { - panic("proto: duplicate enum registered: " + typeName) - } - enumStringMaps[typeName] = unusedNameMap -} - -// EnumValueMap returns the mapping from names to integers of the -// enum type enumType, or a nil if not found. -func EnumValueMap(enumType string) map[string]int32 { - return enumValueMaps[enumType] -} - -// A registry of all linked message types. -// The string is a fully-qualified proto name ("pkg.Message"). -var ( - protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers - protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types - revProtoTypes = make(map[reflect.Type]string) -) - -// RegisterType is called from generated code and maps from the fully qualified -// proto name to the type (pointer to struct) of the protocol buffer. -func RegisterType(x Message, name string) { - if _, ok := protoTypedNils[name]; ok { - // TODO: Some day, make this a panic. - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 { - // Generated code always calls RegisterType with nil x. - // This check is just for extra safety. - protoTypedNils[name] = x - } else { - protoTypedNils[name] = reflect.Zero(t).Interface().(Message) - } - revProtoTypes[t] = name -} - -// RegisterMapType is called from generated code and maps from the fully qualified -// proto name to the native map type of the proto map definition. -func RegisterMapType(x interface{}, name string) { - if reflect.TypeOf(x).Kind() != reflect.Map { - panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name)) - } - if _, ok := protoMapTypes[name]; ok { - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - protoMapTypes[name] = t - revProtoTypes[t] = name -} - -// MessageName returns the fully-qualified proto name for the given message type. -func MessageName(x Message) string { - type xname interface { - XXX_MessageName() string - } - if m, ok := x.(xname); ok { - return m.XXX_MessageName() - } - return revProtoTypes[reflect.TypeOf(x)] -} - -// MessageType returns the message type (pointer to struct) for a named message. -// The type is not guaranteed to implement proto.Message if the name refers to a -// map entry. -func MessageType(name string) reflect.Type { - if t, ok := protoTypedNils[name]; ok { - return reflect.TypeOf(t) - } - return protoMapTypes[name] -} - -// A registry of all linked proto files. -var ( - protoFiles = make(map[string][]byte) // file name => fileDescriptor -) - -// RegisterFile is called from generated code and maps from the -// full file name of a .proto file to its compressed FileDescriptorProto. -func RegisterFile(filename string, fileDescriptor []byte) { - protoFiles[filename] = fileDescriptor -} - -// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. -func FileDescriptor(filename string) []byte { return protoFiles[filename] } diff --git a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/properties_gogo.go b/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/properties_gogo.go deleted file mode 100644 index 40ea3dd..0000000 --- a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/properties_gogo.go +++ /dev/null @@ -1,36 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "reflect" -) - -var sizerType = reflect.TypeOf((*Sizer)(nil)).Elem() -var protosizerType = reflect.TypeOf((*ProtoSizer)(nil)).Elem() diff --git a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/skip_gogo.go b/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/skip_gogo.go deleted file mode 100644 index 5a5fd93..0000000 --- a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/skip_gogo.go +++ /dev/null @@ -1,119 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "io" -) - -func Skip(data []byte) (n int, err error) { - l := len(data) - index := 0 - for index < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if index >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[index] - index++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for { - if index >= l { - return 0, io.ErrUnexpectedEOF - } - index++ - if data[index-1] < 0x80 { - break - } - } - return index, nil - case 1: - index += 8 - return index, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if index >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[index] - index++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - index += length - return index, nil - case 3: - for { - var innerWire uint64 - var start int = index - for shift := uint(0); ; shift += 7 { - if index >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[index] - index++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := Skip(data[start:]) - if err != nil { - return 0, err - } - index = start + next - } - return index, nil - case 4: - return index, nil - case 5: - index += 4 - return index, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} diff --git a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/table_marshal.go b/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/table_marshal.go deleted file mode 100644 index 9b1538d..0000000 --- a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/table_marshal.go +++ /dev/null @@ -1,3006 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "errors" - "fmt" - "math" - "reflect" - "sort" - "strconv" - "strings" - "sync" - "sync/atomic" - "unicode/utf8" -) - -// a sizer takes a pointer to a field and the size of its tag, computes the size of -// the encoded data. -type sizer func(pointer, int) int - -// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format), -// marshals the field to the end of the slice, returns the slice and error (if any). -type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) - -// marshalInfo is the information used for marshaling a message. -type marshalInfo struct { - typ reflect.Type - fields []*marshalFieldInfo - unrecognized field // offset of XXX_unrecognized - extensions field // offset of XXX_InternalExtensions - v1extensions field // offset of XXX_extensions - sizecache field // offset of XXX_sizecache - initialized int32 // 0 -- only typ is set, 1 -- fully initialized - messageset bool // uses message set wire format - hasmarshaler bool // has custom marshaler - sync.RWMutex // protect extElems map, also for initialization - extElems map[int32]*marshalElemInfo // info of extension elements - - hassizer bool // has custom sizer - hasprotosizer bool // has custom protosizer - - bytesExtensions field // offset of XXX_extensions where the field type is []byte -} - -// marshalFieldInfo is the information used for marshaling a field of a message. -type marshalFieldInfo struct { - field field - wiretag uint64 // tag in wire format - tagsize int // size of tag in wire format - sizer sizer - marshaler marshaler - isPointer bool - required bool // field is required - name string // name of the field, for error reporting - oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements -} - -// marshalElemInfo is the information used for marshaling an extension or oneof element. -type marshalElemInfo struct { - wiretag uint64 // tag in wire format - tagsize int // size of tag in wire format - sizer sizer - marshaler marshaler - isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) -} - -var ( - marshalInfoMap = map[reflect.Type]*marshalInfo{} - marshalInfoLock sync.Mutex - - uint8SliceType = reflect.TypeOf(([]uint8)(nil)).Kind() -) - -// getMarshalInfo returns the information to marshal a given type of message. -// The info it returns may not necessarily initialized. -// t is the type of the message (NOT the pointer to it). -func getMarshalInfo(t reflect.Type) *marshalInfo { - marshalInfoLock.Lock() - u, ok := marshalInfoMap[t] - if !ok { - u = &marshalInfo{typ: t} - marshalInfoMap[t] = u - } - marshalInfoLock.Unlock() - return u -} - -// Size is the entry point from generated code, -// and should be ONLY called by generated code. -// It computes the size of encoded data of msg. -// a is a pointer to a place to store cached marshal info. -func (a *InternalMessageInfo) Size(msg Message) int { - u := getMessageMarshalInfo(msg, a) - ptr := toPointer(&msg) - if ptr.isNil() { - // We get here if msg is a typed nil ((*SomeMessage)(nil)), - // so it satisfies the interface, and msg == nil wouldn't - // catch it. We don't want crash in this case. - return 0 - } - return u.size(ptr) -} - -// Marshal is the entry point from generated code, -// and should be ONLY called by generated code. -// It marshals msg to the end of b. -// a is a pointer to a place to store cached marshal info. -func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) { - u := getMessageMarshalInfo(msg, a) - ptr := toPointer(&msg) - if ptr.isNil() { - // We get here if msg is a typed nil ((*SomeMessage)(nil)), - // so it satisfies the interface, and msg == nil wouldn't - // catch it. We don't want crash in this case. - return b, ErrNil - } - return u.marshal(b, ptr, deterministic) -} - -func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo { - // u := a.marshal, but atomically. - // We use an atomic here to ensure memory consistency. - u := atomicLoadMarshalInfo(&a.marshal) - if u == nil { - // Get marshal information from type of message. - t := reflect.ValueOf(msg).Type() - if t.Kind() != reflect.Ptr { - panic(fmt.Sprintf("cannot handle non-pointer message type %v", t)) - } - u = getMarshalInfo(t.Elem()) - // Store it in the cache for later users. - // a.marshal = u, but atomically. - atomicStoreMarshalInfo(&a.marshal, u) - } - return u -} - -// size is the main function to compute the size of the encoded data of a message. -// ptr is the pointer to the message. -func (u *marshalInfo) size(ptr pointer) int { - if atomic.LoadInt32(&u.initialized) == 0 { - u.computeMarshalInfo() - } - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if u.hasmarshaler { - // Uses the message's Size method if available - if u.hassizer { - s := ptr.asPointerTo(u.typ).Interface().(Sizer) - return s.Size() - } - // Uses the message's ProtoSize method if available - if u.hasprotosizer { - s := ptr.asPointerTo(u.typ).Interface().(ProtoSizer) - return s.ProtoSize() - } - - m := ptr.asPointerTo(u.typ).Interface().(Marshaler) - b, _ := m.Marshal() - return len(b) - } - - n := 0 - for _, f := range u.fields { - if f.isPointer && ptr.offset(f.field).getPointer().isNil() { - // nil pointer always marshals to nothing - continue - } - n += f.sizer(ptr.offset(f.field), f.tagsize) - } - if u.extensions.IsValid() { - e := ptr.offset(u.extensions).toExtensions() - if u.messageset { - n += u.sizeMessageSet(e) - } else { - n += u.sizeExtensions(e) - } - } - if u.v1extensions.IsValid() { - m := *ptr.offset(u.v1extensions).toOldExtensions() - n += u.sizeV1Extensions(m) - } - if u.bytesExtensions.IsValid() { - s := *ptr.offset(u.bytesExtensions).toBytes() - n += len(s) - } - if u.unrecognized.IsValid() { - s := *ptr.offset(u.unrecognized).toBytes() - n += len(s) - } - - // cache the result for use in marshal - if u.sizecache.IsValid() { - atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n)) - } - return n -} - -// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated), -// fall back to compute the size. -func (u *marshalInfo) cachedsize(ptr pointer) int { - if u.sizecache.IsValid() { - return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32())) - } - return u.size(ptr) -} - -// marshal is the main function to marshal a message. It takes a byte slice and appends -// the encoded data to the end of the slice, returns the slice and error (if any). -// ptr is the pointer to the message. -// If deterministic is true, map is marshaled in deterministic order. -func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) { - if atomic.LoadInt32(&u.initialized) == 0 { - u.computeMarshalInfo() - } - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if u.hasmarshaler { - m := ptr.asPointerTo(u.typ).Interface().(Marshaler) - b1, err := m.Marshal() - b = append(b, b1...) - return b, err - } - - var err, errLater error - // The old marshaler encodes extensions at beginning. - if u.extensions.IsValid() { - e := ptr.offset(u.extensions).toExtensions() - if u.messageset { - b, err = u.appendMessageSet(b, e, deterministic) - } else { - b, err = u.appendExtensions(b, e, deterministic) - } - if err != nil { - return b, err - } - } - if u.v1extensions.IsValid() { - m := *ptr.offset(u.v1extensions).toOldExtensions() - b, err = u.appendV1Extensions(b, m, deterministic) - if err != nil { - return b, err - } - } - if u.bytesExtensions.IsValid() { - s := *ptr.offset(u.bytesExtensions).toBytes() - b = append(b, s...) - } - for _, f := range u.fields { - if f.required { - if f.isPointer && ptr.offset(f.field).getPointer().isNil() { - // Required field is not set. - // We record the error but keep going, to give a complete marshaling. - if errLater == nil { - errLater = &RequiredNotSetError{f.name} - } - continue - } - } - if f.isPointer && ptr.offset(f.field).getPointer().isNil() { - // nil pointer always marshals to nothing - continue - } - b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic) - if err != nil { - if err1, ok := err.(*RequiredNotSetError); ok { - // Required field in submessage is not set. - // We record the error but keep going, to give a complete marshaling. - if errLater == nil { - errLater = &RequiredNotSetError{f.name + "." + err1.field} - } - continue - } - if err == errRepeatedHasNil { - err = errors.New("proto: repeated field " + f.name + " has nil element") - } - if err == errInvalidUTF8 { - if errLater == nil { - fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name - errLater = &invalidUTF8Error{fullName} - } - continue - } - return b, err - } - } - if u.unrecognized.IsValid() { - s := *ptr.offset(u.unrecognized).toBytes() - b = append(b, s...) - } - return b, errLater -} - -// computeMarshalInfo initializes the marshal info. -func (u *marshalInfo) computeMarshalInfo() { - u.Lock() - defer u.Unlock() - if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock - return - } - - t := u.typ - u.unrecognized = invalidField - u.extensions = invalidField - u.v1extensions = invalidField - u.bytesExtensions = invalidField - u.sizecache = invalidField - isOneofMessage := false - - if reflect.PtrTo(t).Implements(sizerType) { - u.hassizer = true - } - if reflect.PtrTo(t).Implements(protosizerType) { - u.hasprotosizer = true - } - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if reflect.PtrTo(t).Implements(marshalerType) { - u.hasmarshaler = true - atomic.StoreInt32(&u.initialized, 1) - return - } - - n := t.NumField() - - // deal with XXX fields first - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if f.Tag.Get("protobuf_oneof") != "" { - isOneofMessage = true - } - if !strings.HasPrefix(f.Name, "XXX_") { - continue - } - switch f.Name { - case "XXX_sizecache": - u.sizecache = toField(&f) - case "XXX_unrecognized": - u.unrecognized = toField(&f) - case "XXX_InternalExtensions": - u.extensions = toField(&f) - u.messageset = f.Tag.Get("protobuf_messageset") == "1" - case "XXX_extensions": - if f.Type.Kind() == reflect.Map { - u.v1extensions = toField(&f) - } else { - u.bytesExtensions = toField(&f) - } - case "XXX_NoUnkeyedLiteral": - // nothing to do - default: - panic("unknown XXX field: " + f.Name) - } - n-- - } - - // get oneof implementers - var oneofImplementers []interface{} - // gogo: isOneofMessage is needed for embedded oneof messages, without a marshaler and unmarshaler - if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok && isOneofMessage { - _, _, _, oneofImplementers = m.XXX_OneofFuncs() - } - - // normal fields - fields := make([]marshalFieldInfo, n) // batch allocation - u.fields = make([]*marshalFieldInfo, 0, n) - for i, j := 0, 0; i < t.NumField(); i++ { - f := t.Field(i) - - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - field := &fields[j] - j++ - field.name = f.Name - u.fields = append(u.fields, field) - if f.Tag.Get("protobuf_oneof") != "" { - field.computeOneofFieldInfo(&f, oneofImplementers) - continue - } - if f.Tag.Get("protobuf") == "" { - // field has no tag (not in generated message), ignore it - u.fields = u.fields[:len(u.fields)-1] - j-- - continue - } - field.computeMarshalFieldInfo(&f) - } - - // fields are marshaled in tag order on the wire. - sort.Sort(byTag(u.fields)) - - atomic.StoreInt32(&u.initialized, 1) -} - -// helper for sorting fields by tag -type byTag []*marshalFieldInfo - -func (a byTag) Len() int { return len(a) } -func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag } - -// getExtElemInfo returns the information to marshal an extension element. -// The info it returns is initialized. -func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { - // get from cache first - u.RLock() - e, ok := u.extElems[desc.Field] - u.RUnlock() - if ok { - return e - } - - t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct - tags := strings.Split(desc.Tag, ",") - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - sizr, marshalr := typeMarshaler(t, tags, false, false) - e = &marshalElemInfo{ - wiretag: uint64(tag)<<3 | wt, - tagsize: SizeVarint(uint64(tag) << 3), - sizer: sizr, - marshaler: marshalr, - isptr: t.Kind() == reflect.Ptr, - } - - // update cache - u.Lock() - if u.extElems == nil { - u.extElems = make(map[int32]*marshalElemInfo) - } - u.extElems[desc.Field] = e - u.Unlock() - return e -} - -// computeMarshalFieldInfo fills up the information to marshal a field. -func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { - // parse protobuf tag of the field. - // tag has format of "bytes,49,opt,name=foo,def=hello!" - tags := strings.Split(f.Tag.Get("protobuf"), ",") - if tags[0] == "" { - return - } - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - if tags[2] == "req" { - fi.required = true - } - fi.setTag(f, tag, wt) - fi.setMarshaler(f, tags) -} - -func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { - fi.field = toField(f) - fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. - fi.isPointer = true - fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) - fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) - - ityp := f.Type // interface type - for _, o := range oneofImplementers { - t := reflect.TypeOf(o) - if !t.Implements(ityp) { - continue - } - sf := t.Elem().Field(0) // oneof implementer is a struct with a single field - tags := strings.Split(sf.Tag.Get("protobuf"), ",") - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - sizr, marshalr := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value - fi.oneofElems[t.Elem()] = &marshalElemInfo{ - wiretag: uint64(tag)<<3 | wt, - tagsize: SizeVarint(uint64(tag) << 3), - sizer: sizr, - marshaler: marshalr, - } - } -} - -type oneofMessage interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) -} - -// wiretype returns the wire encoding of the type. -func wiretype(encoding string) uint64 { - switch encoding { - case "fixed32": - return WireFixed32 - case "fixed64": - return WireFixed64 - case "varint", "zigzag32", "zigzag64": - return WireVarint - case "bytes": - return WireBytes - case "group": - return WireStartGroup - } - panic("unknown wire type " + encoding) -} - -// setTag fills up the tag (in wire format) and its size in the info of a field. -func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) { - fi.field = toField(f) - fi.wiretag = uint64(tag)<<3 | wt - fi.tagsize = SizeVarint(uint64(tag) << 3) -} - -// setMarshaler fills up the sizer and marshaler in the info of a field. -func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) { - switch f.Type.Kind() { - case reflect.Map: - // map field - fi.isPointer = true - fi.sizer, fi.marshaler = makeMapMarshaler(f) - return - case reflect.Ptr, reflect.Slice: - fi.isPointer = true - } - fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false) -} - -// typeMarshaler returns the sizer and marshaler of a given field. -// t is the type of the field. -// tags is the generated "protobuf" tag of the field. -// If nozero is true, zero value is not marshaled to the wire. -// If oneof is true, it is a oneof field. -func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) { - encoding := tags[0] - - pointer := false - slice := false - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - slice = true - t = t.Elem() - } - if t.Kind() == reflect.Ptr { - pointer = true - t = t.Elem() - } - - packed := false - proto3 := false - ctype := false - isTime := false - isDuration := false - isWktPointer := false - validateUTF8 := true - for i := 2; i < len(tags); i++ { - if tags[i] == "packed" { - packed = true - } - if tags[i] == "proto3" { - proto3 = true - } - if strings.HasPrefix(tags[i], "customtype=") { - ctype = true - } - if tags[i] == "stdtime" { - isTime = true - } - if tags[i] == "stdduration" { - isDuration = true - } - if tags[i] == "wktptr" { - isWktPointer = true - } - } - validateUTF8 = validateUTF8 && proto3 - if !proto3 && !pointer && !slice { - nozero = false - } - - if ctype { - if reflect.PtrTo(t).Implements(customType) { - if slice { - return makeMessageRefSliceMarshaler(getMarshalInfo(t)) - } - if pointer { - return makeCustomPtrMarshaler(getMarshalInfo(t)) - } - return makeCustomMarshaler(getMarshalInfo(t)) - } else { - panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t)) - } - } - - if isTime { - if pointer { - if slice { - return makeTimePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeTimePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeTimeSliceMarshaler(getMarshalInfo(t)) - } - return makeTimeMarshaler(getMarshalInfo(t)) - } - - if isDuration { - if pointer { - if slice { - return makeDurationPtrSliceMarshaler(getMarshalInfo(t)) - } - return makeDurationPtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeDurationSliceMarshaler(getMarshalInfo(t)) - } - return makeDurationMarshaler(getMarshalInfo(t)) - } - - if isWktPointer { - switch t.Kind() { - case reflect.Float64: - if pointer { - if slice { - return makeStdDoubleValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdDoubleValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdDoubleValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdDoubleValueMarshaler(getMarshalInfo(t)) - case reflect.Float32: - if pointer { - if slice { - return makeStdFloatValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdFloatValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdFloatValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdFloatValueMarshaler(getMarshalInfo(t)) - case reflect.Int64: - if pointer { - if slice { - return makeStdInt64ValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdInt64ValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdInt64ValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdInt64ValueMarshaler(getMarshalInfo(t)) - case reflect.Uint64: - if pointer { - if slice { - return makeStdUInt64ValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdUInt64ValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdUInt64ValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdUInt64ValueMarshaler(getMarshalInfo(t)) - case reflect.Int32: - if pointer { - if slice { - return makeStdInt32ValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdInt32ValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdInt32ValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdInt32ValueMarshaler(getMarshalInfo(t)) - case reflect.Uint32: - if pointer { - if slice { - return makeStdUInt32ValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdUInt32ValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdUInt32ValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdUInt32ValueMarshaler(getMarshalInfo(t)) - case reflect.Bool: - if pointer { - if slice { - return makeStdBoolValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdBoolValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdBoolValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdBoolValueMarshaler(getMarshalInfo(t)) - case reflect.String: - if pointer { - if slice { - return makeStdStringValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdStringValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdStringValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdStringValueMarshaler(getMarshalInfo(t)) - case uint8SliceType: - if pointer { - if slice { - return makeStdBytesValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdBytesValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdBytesValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdBytesValueMarshaler(getMarshalInfo(t)) - default: - panic(fmt.Sprintf("unknown wktpointer type %#v", t)) - } - } - - switch t.Kind() { - case reflect.Bool: - if pointer { - return sizeBoolPtr, appendBoolPtr - } - if slice { - if packed { - return sizeBoolPackedSlice, appendBoolPackedSlice - } - return sizeBoolSlice, appendBoolSlice - } - if nozero { - return sizeBoolValueNoZero, appendBoolValueNoZero - } - return sizeBoolValue, appendBoolValue - case reflect.Uint32: - switch encoding { - case "fixed32": - if pointer { - return sizeFixed32Ptr, appendFixed32Ptr - } - if slice { - if packed { - return sizeFixed32PackedSlice, appendFixed32PackedSlice - } - return sizeFixed32Slice, appendFixed32Slice - } - if nozero { - return sizeFixed32ValueNoZero, appendFixed32ValueNoZero - } - return sizeFixed32Value, appendFixed32Value - case "varint": - if pointer { - return sizeVarint32Ptr, appendVarint32Ptr - } - if slice { - if packed { - return sizeVarint32PackedSlice, appendVarint32PackedSlice - } - return sizeVarint32Slice, appendVarint32Slice - } - if nozero { - return sizeVarint32ValueNoZero, appendVarint32ValueNoZero - } - return sizeVarint32Value, appendVarint32Value - } - case reflect.Int32: - switch encoding { - case "fixed32": - if pointer { - return sizeFixedS32Ptr, appendFixedS32Ptr - } - if slice { - if packed { - return sizeFixedS32PackedSlice, appendFixedS32PackedSlice - } - return sizeFixedS32Slice, appendFixedS32Slice - } - if nozero { - return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero - } - return sizeFixedS32Value, appendFixedS32Value - case "varint": - if pointer { - return sizeVarintS32Ptr, appendVarintS32Ptr - } - if slice { - if packed { - return sizeVarintS32PackedSlice, appendVarintS32PackedSlice - } - return sizeVarintS32Slice, appendVarintS32Slice - } - if nozero { - return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero - } - return sizeVarintS32Value, appendVarintS32Value - case "zigzag32": - if pointer { - return sizeZigzag32Ptr, appendZigzag32Ptr - } - if slice { - if packed { - return sizeZigzag32PackedSlice, appendZigzag32PackedSlice - } - return sizeZigzag32Slice, appendZigzag32Slice - } - if nozero { - return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero - } - return sizeZigzag32Value, appendZigzag32Value - } - case reflect.Uint64: - switch encoding { - case "fixed64": - if pointer { - return sizeFixed64Ptr, appendFixed64Ptr - } - if slice { - if packed { - return sizeFixed64PackedSlice, appendFixed64PackedSlice - } - return sizeFixed64Slice, appendFixed64Slice - } - if nozero { - return sizeFixed64ValueNoZero, appendFixed64ValueNoZero - } - return sizeFixed64Value, appendFixed64Value - case "varint": - if pointer { - return sizeVarint64Ptr, appendVarint64Ptr - } - if slice { - if packed { - return sizeVarint64PackedSlice, appendVarint64PackedSlice - } - return sizeVarint64Slice, appendVarint64Slice - } - if nozero { - return sizeVarint64ValueNoZero, appendVarint64ValueNoZero - } - return sizeVarint64Value, appendVarint64Value - } - case reflect.Int64: - switch encoding { - case "fixed64": - if pointer { - return sizeFixedS64Ptr, appendFixedS64Ptr - } - if slice { - if packed { - return sizeFixedS64PackedSlice, appendFixedS64PackedSlice - } - return sizeFixedS64Slice, appendFixedS64Slice - } - if nozero { - return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero - } - return sizeFixedS64Value, appendFixedS64Value - case "varint": - if pointer { - return sizeVarintS64Ptr, appendVarintS64Ptr - } - if slice { - if packed { - return sizeVarintS64PackedSlice, appendVarintS64PackedSlice - } - return sizeVarintS64Slice, appendVarintS64Slice - } - if nozero { - return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero - } - return sizeVarintS64Value, appendVarintS64Value - case "zigzag64": - if pointer { - return sizeZigzag64Ptr, appendZigzag64Ptr - } - if slice { - if packed { - return sizeZigzag64PackedSlice, appendZigzag64PackedSlice - } - return sizeZigzag64Slice, appendZigzag64Slice - } - if nozero { - return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero - } - return sizeZigzag64Value, appendZigzag64Value - } - case reflect.Float32: - if pointer { - return sizeFloat32Ptr, appendFloat32Ptr - } - if slice { - if packed { - return sizeFloat32PackedSlice, appendFloat32PackedSlice - } - return sizeFloat32Slice, appendFloat32Slice - } - if nozero { - return sizeFloat32ValueNoZero, appendFloat32ValueNoZero - } - return sizeFloat32Value, appendFloat32Value - case reflect.Float64: - if pointer { - return sizeFloat64Ptr, appendFloat64Ptr - } - if slice { - if packed { - return sizeFloat64PackedSlice, appendFloat64PackedSlice - } - return sizeFloat64Slice, appendFloat64Slice - } - if nozero { - return sizeFloat64ValueNoZero, appendFloat64ValueNoZero - } - return sizeFloat64Value, appendFloat64Value - case reflect.String: - if validateUTF8 { - if pointer { - return sizeStringPtr, appendUTF8StringPtr - } - if slice { - return sizeStringSlice, appendUTF8StringSlice - } - if nozero { - return sizeStringValueNoZero, appendUTF8StringValueNoZero - } - return sizeStringValue, appendUTF8StringValue - } - if pointer { - return sizeStringPtr, appendStringPtr - } - if slice { - return sizeStringSlice, appendStringSlice - } - if nozero { - return sizeStringValueNoZero, appendStringValueNoZero - } - return sizeStringValue, appendStringValue - case reflect.Slice: - if slice { - return sizeBytesSlice, appendBytesSlice - } - if oneof { - // Oneof bytes field may also have "proto3" tag. - // We want to marshal it as a oneof field. Do this - // check before the proto3 check. - return sizeBytesOneof, appendBytesOneof - } - if proto3 { - return sizeBytes3, appendBytes3 - } - return sizeBytes, appendBytes - case reflect.Struct: - switch encoding { - case "group": - if slice { - return makeGroupSliceMarshaler(getMarshalInfo(t)) - } - return makeGroupMarshaler(getMarshalInfo(t)) - case "bytes": - if pointer { - if slice { - return makeMessageSliceMarshaler(getMarshalInfo(t)) - } - return makeMessageMarshaler(getMarshalInfo(t)) - } else { - if slice { - return makeMessageRefSliceMarshaler(getMarshalInfo(t)) - } - return makeMessageRefMarshaler(getMarshalInfo(t)) - } - } - } - panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding)) -} - -// Below are functions to size/marshal a specific type of a field. -// They are stored in the field's info, and called by function pointers. -// They have type sizer or marshaler. - -func sizeFixed32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFixed32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFixed32Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - return (4 + tagsize) * len(s) -} -func sizeFixed32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFixedS32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFixedS32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFixedS32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - return (4 + tagsize) * len(s) -} -func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFloat32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int { - v := math.Float32bits(*ptr.toFloat32()) - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFloat32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toFloat32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFloat32Slice(ptr pointer, tagsize int) int { - s := *ptr.toFloat32Slice() - return (4 + tagsize) * len(s) -} -func sizeFloat32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toFloat32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFixed64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFixed64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFixed64Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - return (8 + tagsize) * len(s) -} -func sizeFixed64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeFixedS64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFixedS64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFixedS64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - return (8 + tagsize) * len(s) -} -func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeFloat64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int { - v := math.Float64bits(*ptr.toFloat64()) - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFloat64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toFloat64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFloat64Slice(ptr pointer, tagsize int) int { - s := *ptr.toFloat64Slice() - return (8 + tagsize) * len(s) -} -func sizeFloat64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toFloat64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeVarint32Value(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarint32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint32Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarint32Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarint32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarintS32Value(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarintS32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarint64Value(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - return SizeVarint(v) + tagsize -} -func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - if v == 0 { - return 0 - } - return SizeVarint(v) + tagsize -} -func sizeVarint64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint64Ptr() - if p == nil { - return 0 - } - return SizeVarint(*p) + tagsize -} -func sizeVarint64Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(v) + tagsize - } - return n -} -func sizeVarint64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(v) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarintS64Value(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarintS64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeZigzag32Value(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - v := *p - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize - } - return n -} -func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeZigzag64Value(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - v := *p - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize - } - return n -} -func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeBoolValue(_ pointer, tagsize int) int { - return 1 + tagsize -} -func sizeBoolValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toBool() - if !v { - return 0 - } - return 1 + tagsize -} -func sizeBoolPtr(ptr pointer, tagsize int) int { - p := *ptr.toBoolPtr() - if p == nil { - return 0 - } - return 1 + tagsize -} -func sizeBoolSlice(ptr pointer, tagsize int) int { - s := *ptr.toBoolSlice() - return (1 + tagsize) * len(s) -} -func sizeBoolPackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toBoolSlice() - if len(s) == 0 { - return 0 - } - return len(s) + SizeVarint(uint64(len(s))) + tagsize -} -func sizeStringValue(ptr pointer, tagsize int) int { - v := *ptr.toString() - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toString() - if v == "" { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringPtr(ptr pointer, tagsize int) int { - p := *ptr.toStringPtr() - if p == nil { - return 0 - } - v := *p - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringSlice(ptr pointer, tagsize int) int { - s := *ptr.toStringSlice() - n := 0 - for _, v := range s { - n += len(v) + SizeVarint(uint64(len(v))) + tagsize - } - return n -} -func sizeBytes(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - if v == nil { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytes3(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - if len(v) == 0 { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytesOneof(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytesSlice(ptr pointer, tagsize int) int { - s := *ptr.toBytesSlice() - n := 0 - for _, v := range s { - n += len(v) + SizeVarint(uint64(len(v))) + tagsize - } - return n -} - -// appendFixed32 appends an encoded fixed32 to b. -func appendFixed32(b []byte, v uint32) []byte { - b = append(b, - byte(v), - byte(v>>8), - byte(v>>16), - byte(v>>24)) - return b -} - -// appendFixed64 appends an encoded fixed64 to b. -func appendFixed64(b []byte, v uint64) []byte { - b = append(b, - byte(v), - byte(v>>8), - byte(v>>16), - byte(v>>24), - byte(v>>32), - byte(v>>40), - byte(v>>48), - byte(v>>56)) - return b -} - -// appendVarint appends an encoded varint to b. -func appendVarint(b []byte, v uint64) []byte { - // TODO: make 1-byte (maybe 2-byte) case inline-able, once we - // have non-leaf inliner. - switch { - case v < 1<<7: - b = append(b, byte(v)) - case v < 1<<14: - b = append(b, - byte(v&0x7f|0x80), - byte(v>>7)) - case v < 1<<21: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte(v>>14)) - case v < 1<<28: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte(v>>21)) - case v < 1<<35: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte(v>>28)) - case v < 1<<42: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte(v>>35)) - case v < 1<<49: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte(v>>42)) - case v < 1<<56: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte(v>>49)) - case v < 1<<63: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte((v>>49)&0x7f|0x80), - byte(v>>56)) - default: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte((v>>49)&0x7f|0x80), - byte((v>>56)&0x7f|0x80), - 1) - } - return b -} - -func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, *p) - return b, nil -} -func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - } - return b, nil -} -func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, v) - } - return b, nil -} -func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - return b, nil -} -func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - return b, nil -} -func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(*p)) - return b, nil -} -func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - } - return b, nil -} -func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, uint32(v)) - } - return b, nil -} -func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float32bits(*ptr.toFloat32()) - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float32bits(*ptr.toFloat32()) - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toFloat32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, math.Float32bits(*p)) - return b, nil -} -func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, math.Float32bits(v)) - } - return b, nil -} -func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, math.Float32bits(v)) - } - return b, nil -} -func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, *p) - return b, nil -} -func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - } - return b, nil -} -func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, v) - } - return b, nil -} -func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - return b, nil -} -func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - return b, nil -} -func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(*p)) - return b, nil -} -func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - } - return b, nil -} -func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, uint64(v)) - } - return b, nil -} -func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float64bits(*ptr.toFloat64()) - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float64bits(*ptr.toFloat64()) - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toFloat64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, math.Float64bits(*p)) - return b, nil -} -func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, math.Float64bits(v)) - } - return b, nil -} -func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, math.Float64bits(v)) - } - return b, nil -} -func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - return b, nil -} -func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - return b, nil -} -func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, *p) - return b, nil -} -func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - } - return b, nil -} -func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(v) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, v) - } - return b, nil -} -func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - v := *p - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - } - return b, nil -} -func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - } - return b, nil -} -func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - v := *p - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - } - return b, nil -} -func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - } - return b, nil -} -func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBool() - b = appendVarint(b, wiretag) - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - return b, nil -} -func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBool() - if !v { - return b, nil - } - b = appendVarint(b, wiretag) - b = append(b, 1) - return b, nil -} - -func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toBoolPtr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - if *p { - b = append(b, 1) - } else { - b = append(b, 0) - } - return b, nil -} -func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBoolSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - } - return b, nil -} -func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBoolSlice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(len(s))) - for _, v := range s { - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - } - return b, nil -} -func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toString() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toString() - if v == "" { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toStringPtr() - if p == nil { - return b, nil - } - v := *p - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toStringSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - return b, nil -} -func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - v := *ptr.toString() - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - v := *ptr.toString() - if v == "" { - return b, nil - } - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - p := *ptr.toStringPtr() - if p == nil { - return b, nil - } - v := *p - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - s := *ptr.toStringSlice() - for _, v := range s { - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - if v == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - if len(v) == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBytesSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - return b, nil -} - -// makeGroupMarshaler returns the sizer and marshaler for a group. -// u is the marshal info of the underlying message. -func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - p := ptr.getPointer() - if p.isNil() { - return 0 - } - return u.size(p) + 2*tagsize - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - p := ptr.getPointer() - if p.isNil() { - return b, nil - } - var err error - b = appendVarint(b, wiretag) // start group - b, err = u.marshal(b, p, deterministic) - b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group - return b, err - } -} - -// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice. -// u is the marshal info of the underlying message. -func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getPointerSlice() - n := 0 - for _, v := range s { - if v.isNil() { - continue - } - n += u.size(v) + 2*tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getPointerSlice() - var err error - var nerr nonFatal - for _, v := range s { - if v.isNil() { - return b, errRepeatedHasNil - } - b = appendVarint(b, wiretag) // start group - b, err = u.marshal(b, v, deterministic) - b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group - if !nerr.Merge(err) { - if err == ErrNil { - err = errRepeatedHasNil - } - return b, err - } - } - return b, nerr.E - } -} - -// makeMessageMarshaler returns the sizer and marshaler for a message field. -// u is the marshal info of the message. -func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - p := ptr.getPointer() - if p.isNil() { - return 0 - } - siz := u.size(p) - return siz + SizeVarint(uint64(siz)) + tagsize - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - p := ptr.getPointer() - if p.isNil() { - return b, nil - } - b = appendVarint(b, wiretag) - siz := u.cachedsize(p) - b = appendVarint(b, uint64(siz)) - return u.marshal(b, p, deterministic) - } -} - -// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice. -// u is the marshal info of the message. -func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getPointerSlice() - n := 0 - for _, v := range s { - if v.isNil() { - continue - } - siz := u.size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getPointerSlice() - var err error - var nerr nonFatal - for _, v := range s { - if v.isNil() { - return b, errRepeatedHasNil - } - b = appendVarint(b, wiretag) - siz := u.cachedsize(v) - b = appendVarint(b, uint64(siz)) - b, err = u.marshal(b, v, deterministic) - - if !nerr.Merge(err) { - if err == ErrNil { - err = errRepeatedHasNil - } - return b, err - } - } - return b, nerr.E - } -} - -// makeMapMarshaler returns the sizer and marshaler for a map field. -// f is the pointer to the reflect data structure of the field. -func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { - // figure out key and value type - t := f.Type - keyType := t.Key() - valType := t.Elem() - tags := strings.Split(f.Tag.Get("protobuf"), ",") - keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",") - valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") - stdOptions := false - for _, t := range tags { - if strings.HasPrefix(t, "customtype=") { - valTags = append(valTags, t) - } - if t == "stdtime" { - valTags = append(valTags, t) - stdOptions = true - } - if t == "stdduration" { - valTags = append(valTags, t) - stdOptions = true - } - if t == "wktptr" { - valTags = append(valTags, t) - } - } - keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map - valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map - keyWireTag := 1<<3 | wiretype(keyTags[0]) - valWireTag := 2<<3 | wiretype(valTags[0]) - - // We create an interface to get the addresses of the map key and value. - // If value is pointer-typed, the interface is a direct interface, the - // idata itself is the value. Otherwise, the idata is the pointer to the - // value. - // Key cannot be pointer-typed. - valIsPtr := valType.Kind() == reflect.Ptr - - // If value is a message with nested maps, calling - // valSizer in marshal may be quadratic. We should use - // cached version in marshal (but not in size). - // If value is not message type, we don't have size cache, - // but it cannot be nested either. Just use valSizer. - valCachedSizer := valSizer - if valIsPtr && !stdOptions && valType.Elem().Kind() == reflect.Struct { - u := getMarshalInfo(valType.Elem()) - valCachedSizer = func(ptr pointer, tagsize int) int { - // Same as message sizer, but use cache. - p := ptr.getPointer() - if p.isNil() { - return 0 - } - siz := u.cachedsize(p) - return siz + SizeVarint(uint64(siz)) + tagsize - } - } - return func(ptr pointer, tagsize int) int { - m := ptr.asPointerTo(t).Elem() // the map - n := 0 - for _, k := range m.MapKeys() { - ki := k.Interface() - vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value - siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) { - m := ptr.asPointerTo(t).Elem() // the map - var err error - keys := m.MapKeys() - if len(keys) > 1 && deterministic { - sort.Sort(mapKeys(keys)) - } - - var nerr nonFatal - for _, k := range keys { - ki := k.Interface() - vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value - b = appendVarint(b, tag) - siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) - b = appendVarint(b, uint64(siz)) - b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic) - if !nerr.Merge(err) { - return b, err - } - b, err = valMarshaler(b, vaddr, valWireTag, deterministic) - if err != ErrNil && !nerr.Merge(err) { // allow nil value in map - return b, err - } - } - return b, nerr.E - } -} - -// makeOneOfMarshaler returns the sizer and marshaler for a oneof field. -// fi is the marshal info of the field. -// f is the pointer to the reflect data structure of the field. -func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) { - // Oneof field is an interface. We need to get the actual data type on the fly. - t := f.Type - return func(ptr pointer, _ int) int { - p := ptr.getInterfacePointer() - if p.isNil() { - return 0 - } - v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct - telem := v.Type() - e := fi.oneofElems[telem] - return e.sizer(p, e.tagsize) - }, - func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) { - p := ptr.getInterfacePointer() - if p.isNil() { - return b, nil - } - v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct - telem := v.Type() - if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() { - return b, errOneofHasNil - } - e := fi.oneofElems[telem] - return e.marshaler(b, p, e.wiretag, deterministic) - } -} - -// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field. -func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { - m, mu := ext.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - - n := 0 - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - n += ei.sizer(p, ei.tagsize) - } - mu.Unlock() - return n -} - -// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b. -func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { - m, mu := ext.extensionsRead() - if m == nil { - return b, nil - } - mu.Lock() - defer mu.Unlock() - - var err error - var nerr nonFatal - - // Fast-path for common cases: zero or one extensions. - // Don't bother sorting the keys. - if len(m) <= 1 { - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E - } - - // Sort the keys to provide a deterministic encoding. - // Not sure this is required, but the old code does it. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, k := range keys { - e := m[int32(k)] - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// message set format is: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } - -// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field -// in message set format (above). -func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { - m, mu := ext.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - - n := 0 - for id, e := range m { - n += 2 // start group, end group. tag = 1 (size=1) - n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - siz := len(msgWithLen) - n += siz + 1 // message, tag = 3 (size=1) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - n += ei.sizer(p, 1) // message, tag = 3 (size=1) - } - mu.Unlock() - return n -} - -// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above) -// to the end of byte slice b. -func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { - m, mu := ext.extensionsRead() - if m == nil { - return b, nil - } - mu.Lock() - defer mu.Unlock() - - var err error - var nerr nonFatal - - // Fast-path for common cases: zero or one extensions. - // Don't bother sorting the keys. - if len(m) <= 1 { - for id, e := range m { - b = append(b, 1<<3|WireStartGroup) - b = append(b, 2<<3|WireVarint) - b = appendVarint(b, uint64(id)) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - b = append(b, 3<<3|WireBytes) - b = append(b, msgWithLen...) - b = append(b, 1<<3|WireEndGroup) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) - if !nerr.Merge(err) { - return b, err - } - b = append(b, 1<<3|WireEndGroup) - } - return b, nerr.E - } - - // Sort the keys to provide a deterministic encoding. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, id := range keys { - e := m[int32(id)] - b = append(b, 1<<3|WireStartGroup) - b = append(b, 2<<3|WireVarint) - b = appendVarint(b, uint64(id)) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - b = append(b, 3<<3|WireBytes) - b = append(b, msgWithLen...) - b = append(b, 1<<3|WireEndGroup) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) - b = append(b, 1<<3|WireEndGroup) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// sizeV1Extensions computes the size of encoded data for a V1-API extension field. -func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { - if m == nil { - return 0 - } - - n := 0 - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - n += ei.sizer(p, ei.tagsize) - } - return n -} - -// appendV1Extensions marshals a V1-API extension field to the end of byte slice b. -func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) { - if m == nil { - return b, nil - } - - // Sort the keys to provide a deterministic encoding. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - var err error - var nerr nonFatal - for _, k := range keys { - e := m[int32(k)] - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// newMarshaler is the interface representing objects that can marshal themselves. -// -// This exists to support protoc-gen-go generated messages. -// The proto package will stop type-asserting to this interface in the future. -// -// DO NOT DEPEND ON THIS. -type newMarshaler interface { - XXX_Size() int - XXX_Marshal(b []byte, deterministic bool) ([]byte, error) -} - -// Size returns the encoded size of a protocol buffer message. -// This is the main entry point. -func Size(pb Message) int { - if m, ok := pb.(newMarshaler); ok { - return m.XXX_Size() - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - b, _ := m.Marshal() - return len(b) - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return 0 - } - var info InternalMessageInfo - return info.Size(pb) -} - -// Marshal takes a protocol buffer message -// and encodes it into the wire format, returning the data. -// This is the main entry point. -func Marshal(pb Message) ([]byte, error) { - if m, ok := pb.(newMarshaler); ok { - siz := m.XXX_Size() - b := make([]byte, 0, siz) - return m.XXX_Marshal(b, false) - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - return m.Marshal() - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return nil, ErrNil - } - var info InternalMessageInfo - siz := info.Size(pb) - b := make([]byte, 0, siz) - return info.Marshal(b, pb, false) -} - -// Marshal takes a protocol buffer message -// and encodes it into the wire format, writing the result to the -// Buffer. -// This is an alternative entry point. It is not necessary to use -// a Buffer for most applications. -func (p *Buffer) Marshal(pb Message) error { - var err error - if p.deterministic { - if _, ok := pb.(Marshaler); ok { - return fmt.Errorf("proto: deterministic not supported by the Marshal method of %T", pb) - } - } - if m, ok := pb.(newMarshaler); ok { - siz := m.XXX_Size() - p.grow(siz) // make sure buf has enough capacity - p.buf, err = m.XXX_Marshal(p.buf, p.deterministic) - return err - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - var b []byte - b, err = m.Marshal() - p.buf = append(p.buf, b...) - return err - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return ErrNil - } - var info InternalMessageInfo - siz := info.Size(pb) - p.grow(siz) // make sure buf has enough capacity - p.buf, err = info.Marshal(p.buf, pb, p.deterministic) - return err -} - -// grow grows the buffer's capacity, if necessary, to guarantee space for -// another n bytes. After grow(n), at least n bytes can be written to the -// buffer without another allocation. -func (p *Buffer) grow(n int) { - need := len(p.buf) + n - if need <= cap(p.buf) { - return - } - newCap := len(p.buf) * 2 - if newCap < need { - newCap = need - } - p.buf = append(make([]byte, 0, newCap), p.buf...) -} diff --git a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go b/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go deleted file mode 100644 index 997f57c..0000000 --- a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go +++ /dev/null @@ -1,388 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "reflect" - "time" -) - -// makeMessageRefMarshaler differs a bit from makeMessageMarshaler -// It marshal a message T instead of a *T -func makeMessageRefMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - siz := u.size(ptr) - return siz + SizeVarint(uint64(siz)) + tagsize - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - b = appendVarint(b, wiretag) - siz := u.cachedsize(ptr) - b = appendVarint(b, uint64(siz)) - return u.marshal(b, ptr, deterministic) - } -} - -// makeMessageRefSliceMarshaler differs quite a lot from makeMessageSliceMarshaler -// It marshals a slice of messages []T instead of []*T -func makeMessageRefSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - e := elem.Interface() - v := toAddrPointer(&e, false) - siz := u.size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - var err, errreq error - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - e := elem.Interface() - v := toAddrPointer(&e, false) - b = appendVarint(b, wiretag) - siz := u.size(v) - b = appendVarint(b, uint64(siz)) - b, err = u.marshal(b, v, deterministic) - - if err != nil { - if _, ok := err.(*RequiredNotSetError); ok { - // Required field in submessage is not set. - // We record the error but keep going, to give a complete marshaling. - if errreq == nil { - errreq = err - } - continue - } - if err == ErrNil { - err = errRepeatedHasNil - } - return b, err - } - } - - return b, errreq - } -} - -func makeCustomPtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom) - siz := m.Size() - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom) - siz := m.Size() - buf, err := m.Marshal() - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - return b, nil - } -} - -func makeCustomMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - m := ptr.asPointerTo(u.typ).Interface().(custom) - siz := m.Size() - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - m := ptr.asPointerTo(u.typ).Interface().(custom) - siz := m.Size() - buf, err := m.Marshal() - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - return b, nil - } -} - -func makeTimeMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return 0 - } - siz := Size(ts) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return nil, err - } - buf, err := Marshal(ts) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeTimePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return 0 - } - siz := Size(ts) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return nil, err - } - buf, err := Marshal(ts) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeTimeSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(time.Time) - ts, err := timestampProto(t) - if err != nil { - return 0 - } - siz := Size(ts) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(time.Time) - ts, err := timestampProto(t) - if err != nil { - return nil, err - } - siz := Size(ts) - buf, err := Marshal(ts) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeTimePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return 0 - } - siz := Size(ts) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return nil, err - } - siz := Size(ts) - buf, err := Marshal(ts) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeDurationMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - d := ptr.asPointerTo(u.typ).Interface().(*time.Duration) - dur := durationProto(*d) - siz := Size(dur) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - d := ptr.asPointerTo(u.typ).Interface().(*time.Duration) - dur := durationProto(*d) - buf, err := Marshal(dur) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeDurationPtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration) - dur := durationProto(*d) - siz := Size(dur) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration) - dur := durationProto(*d) - buf, err := Marshal(dur) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeDurationSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - d := elem.Interface().(time.Duration) - dur := durationProto(d) - siz := Size(dur) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - d := elem.Interface().(time.Duration) - dur := durationProto(d) - siz := Size(dur) - buf, err := Marshal(dur) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeDurationPtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - d := elem.Interface().(*time.Duration) - dur := durationProto(*d) - siz := Size(dur) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - d := elem.Interface().(*time.Duration) - dur := durationProto(*d) - siz := Size(dur) - buf, err := Marshal(dur) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} diff --git a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/table_merge.go b/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/table_merge.go deleted file mode 100644 index f520106..0000000 --- a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/table_merge.go +++ /dev/null @@ -1,657 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "reflect" - "strings" - "sync" - "sync/atomic" -) - -// Merge merges the src message into dst. -// This assumes that dst and src of the same type and are non-nil. -func (a *InternalMessageInfo) Merge(dst, src Message) { - mi := atomicLoadMergeInfo(&a.merge) - if mi == nil { - mi = getMergeInfo(reflect.TypeOf(dst).Elem()) - atomicStoreMergeInfo(&a.merge, mi) - } - mi.merge(toPointer(&dst), toPointer(&src)) -} - -type mergeInfo struct { - typ reflect.Type - - initialized int32 // 0: only typ is valid, 1: everything is valid - lock sync.Mutex - - fields []mergeFieldInfo - unrecognized field // Offset of XXX_unrecognized -} - -type mergeFieldInfo struct { - field field // Offset of field, guaranteed to be valid - - // isPointer reports whether the value in the field is a pointer. - // This is true for the following situations: - // * Pointer to struct - // * Pointer to basic type (proto2 only) - // * Slice (first value in slice header is a pointer) - // * String (first value in string header is a pointer) - isPointer bool - - // basicWidth reports the width of the field assuming that it is directly - // embedded in the struct (as is the case for basic types in proto3). - // The possible values are: - // 0: invalid - // 1: bool - // 4: int32, uint32, float32 - // 8: int64, uint64, float64 - basicWidth int - - // Where dst and src are pointers to the types being merged. - merge func(dst, src pointer) -} - -var ( - mergeInfoMap = map[reflect.Type]*mergeInfo{} - mergeInfoLock sync.Mutex -) - -func getMergeInfo(t reflect.Type) *mergeInfo { - mergeInfoLock.Lock() - defer mergeInfoLock.Unlock() - mi := mergeInfoMap[t] - if mi == nil { - mi = &mergeInfo{typ: t} - mergeInfoMap[t] = mi - } - return mi -} - -// merge merges src into dst assuming they are both of type *mi.typ. -func (mi *mergeInfo) merge(dst, src pointer) { - if dst.isNil() { - panic("proto: nil destination") - } - if src.isNil() { - return // Nothing to do. - } - - if atomic.LoadInt32(&mi.initialized) == 0 { - mi.computeMergeInfo() - } - - for _, fi := range mi.fields { - sfp := src.offset(fi.field) - - // As an optimization, we can avoid the merge function call cost - // if we know for sure that the source will have no effect - // by checking if it is the zero value. - if unsafeAllowed { - if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string - continue - } - if fi.basicWidth > 0 { - switch { - case fi.basicWidth == 1 && !*sfp.toBool(): - continue - case fi.basicWidth == 4 && *sfp.toUint32() == 0: - continue - case fi.basicWidth == 8 && *sfp.toUint64() == 0: - continue - } - } - } - - dfp := dst.offset(fi.field) - fi.merge(dfp, sfp) - } - - // TODO: Make this faster? - out := dst.asPointerTo(mi.typ).Elem() - in := src.asPointerTo(mi.typ).Elem() - if emIn, err := extendable(in.Addr().Interface()); err == nil { - emOut, _ := extendable(out.Addr().Interface()) - mIn, muIn := emIn.extensionsRead() - if mIn != nil { - mOut := emOut.extensionsWrite() - muIn.Lock() - mergeExtension(mOut, mIn) - muIn.Unlock() - } - } - - if mi.unrecognized.IsValid() { - if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 { - *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...) - } - } -} - -func (mi *mergeInfo) computeMergeInfo() { - mi.lock.Lock() - defer mi.lock.Unlock() - if mi.initialized != 0 { - return - } - t := mi.typ - n := t.NumField() - - props := GetProperties(t) - for i := 0; i < n; i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - - mfi := mergeFieldInfo{field: toField(&f)} - tf := f.Type - - // As an optimization, we can avoid the merge function call cost - // if we know for sure that the source will have no effect - // by checking if it is the zero value. - if unsafeAllowed { - switch tf.Kind() { - case reflect.Ptr, reflect.Slice, reflect.String: - // As a special case, we assume slices and strings are pointers - // since we know that the first field in the SliceSlice or - // StringHeader is a data pointer. - mfi.isPointer = true - case reflect.Bool: - mfi.basicWidth = 1 - case reflect.Int32, reflect.Uint32, reflect.Float32: - mfi.basicWidth = 4 - case reflect.Int64, reflect.Uint64, reflect.Float64: - mfi.basicWidth = 8 - } - } - - // Unwrap tf to get at its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic("both pointer and slice for basic type in " + tf.Name()) - } - - switch tf.Kind() { - case reflect.Int32: - switch { - case isSlice: // E.g., []int32 - mfi.merge = func(dst, src pointer) { - // NOTE: toInt32Slice is not defined (see pointer_reflect.go). - /* - sfsp := src.toInt32Slice() - if *sfsp != nil { - dfsp := dst.toInt32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []int64{} - } - } - */ - sfs := src.getInt32Slice() - if sfs != nil { - dfs := dst.getInt32Slice() - dfs = append(dfs, sfs...) - if dfs == nil { - dfs = []int32{} - } - dst.setInt32Slice(dfs) - } - } - case isPointer: // E.g., *int32 - mfi.merge = func(dst, src pointer) { - // NOTE: toInt32Ptr is not defined (see pointer_reflect.go). - /* - sfpp := src.toInt32Ptr() - if *sfpp != nil { - dfpp := dst.toInt32Ptr() - if *dfpp == nil { - *dfpp = Int32(**sfpp) - } else { - **dfpp = **sfpp - } - } - */ - sfp := src.getInt32Ptr() - if sfp != nil { - dfp := dst.getInt32Ptr() - if dfp == nil { - dst.setInt32Ptr(*sfp) - } else { - *dfp = *sfp - } - } - } - default: // E.g., int32 - mfi.merge = func(dst, src pointer) { - if v := *src.toInt32(); v != 0 { - *dst.toInt32() = v - } - } - } - case reflect.Int64: - switch { - case isSlice: // E.g., []int64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toInt64Slice() - if *sfsp != nil { - dfsp := dst.toInt64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []int64{} - } - } - } - case isPointer: // E.g., *int64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toInt64Ptr() - if *sfpp != nil { - dfpp := dst.toInt64Ptr() - if *dfpp == nil { - *dfpp = Int64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., int64 - mfi.merge = func(dst, src pointer) { - if v := *src.toInt64(); v != 0 { - *dst.toInt64() = v - } - } - } - case reflect.Uint32: - switch { - case isSlice: // E.g., []uint32 - mfi.merge = func(dst, src pointer) { - sfsp := src.toUint32Slice() - if *sfsp != nil { - dfsp := dst.toUint32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []uint32{} - } - } - } - case isPointer: // E.g., *uint32 - mfi.merge = func(dst, src pointer) { - sfpp := src.toUint32Ptr() - if *sfpp != nil { - dfpp := dst.toUint32Ptr() - if *dfpp == nil { - *dfpp = Uint32(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., uint32 - mfi.merge = func(dst, src pointer) { - if v := *src.toUint32(); v != 0 { - *dst.toUint32() = v - } - } - } - case reflect.Uint64: - switch { - case isSlice: // E.g., []uint64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toUint64Slice() - if *sfsp != nil { - dfsp := dst.toUint64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []uint64{} - } - } - } - case isPointer: // E.g., *uint64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toUint64Ptr() - if *sfpp != nil { - dfpp := dst.toUint64Ptr() - if *dfpp == nil { - *dfpp = Uint64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., uint64 - mfi.merge = func(dst, src pointer) { - if v := *src.toUint64(); v != 0 { - *dst.toUint64() = v - } - } - } - case reflect.Float32: - switch { - case isSlice: // E.g., []float32 - mfi.merge = func(dst, src pointer) { - sfsp := src.toFloat32Slice() - if *sfsp != nil { - dfsp := dst.toFloat32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []float32{} - } - } - } - case isPointer: // E.g., *float32 - mfi.merge = func(dst, src pointer) { - sfpp := src.toFloat32Ptr() - if *sfpp != nil { - dfpp := dst.toFloat32Ptr() - if *dfpp == nil { - *dfpp = Float32(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., float32 - mfi.merge = func(dst, src pointer) { - if v := *src.toFloat32(); v != 0 { - *dst.toFloat32() = v - } - } - } - case reflect.Float64: - switch { - case isSlice: // E.g., []float64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toFloat64Slice() - if *sfsp != nil { - dfsp := dst.toFloat64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []float64{} - } - } - } - case isPointer: // E.g., *float64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toFloat64Ptr() - if *sfpp != nil { - dfpp := dst.toFloat64Ptr() - if *dfpp == nil { - *dfpp = Float64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., float64 - mfi.merge = func(dst, src pointer) { - if v := *src.toFloat64(); v != 0 { - *dst.toFloat64() = v - } - } - } - case reflect.Bool: - switch { - case isSlice: // E.g., []bool - mfi.merge = func(dst, src pointer) { - sfsp := src.toBoolSlice() - if *sfsp != nil { - dfsp := dst.toBoolSlice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []bool{} - } - } - } - case isPointer: // E.g., *bool - mfi.merge = func(dst, src pointer) { - sfpp := src.toBoolPtr() - if *sfpp != nil { - dfpp := dst.toBoolPtr() - if *dfpp == nil { - *dfpp = Bool(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., bool - mfi.merge = func(dst, src pointer) { - if v := *src.toBool(); v { - *dst.toBool() = v - } - } - } - case reflect.String: - switch { - case isSlice: // E.g., []string - mfi.merge = func(dst, src pointer) { - sfsp := src.toStringSlice() - if *sfsp != nil { - dfsp := dst.toStringSlice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []string{} - } - } - } - case isPointer: // E.g., *string - mfi.merge = func(dst, src pointer) { - sfpp := src.toStringPtr() - if *sfpp != nil { - dfpp := dst.toStringPtr() - if *dfpp == nil { - *dfpp = String(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., string - mfi.merge = func(dst, src pointer) { - if v := *src.toString(); v != "" { - *dst.toString() = v - } - } - } - case reflect.Slice: - isProto3 := props.Prop[i].proto3 - switch { - case isPointer: - panic("bad pointer in byte slice case in " + tf.Name()) - case tf.Elem().Kind() != reflect.Uint8: - panic("bad element kind in byte slice case in " + tf.Name()) - case isSlice: // E.g., [][]byte - mfi.merge = func(dst, src pointer) { - sbsp := src.toBytesSlice() - if *sbsp != nil { - dbsp := dst.toBytesSlice() - for _, sb := range *sbsp { - if sb == nil { - *dbsp = append(*dbsp, nil) - } else { - *dbsp = append(*dbsp, append([]byte{}, sb...)) - } - } - if *dbsp == nil { - *dbsp = [][]byte{} - } - } - } - default: // E.g., []byte - mfi.merge = func(dst, src pointer) { - sbp := src.toBytes() - if *sbp != nil { - dbp := dst.toBytes() - if !isProto3 || len(*sbp) > 0 { - *dbp = append([]byte{}, *sbp...) - } - } - } - } - case reflect.Struct: - switch { - case !isPointer: - mergeInfo := getMergeInfo(tf) - mfi.merge = func(dst, src pointer) { - mergeInfo.merge(dst, src) - } - case isSlice: // E.g., []*pb.T - mergeInfo := getMergeInfo(tf) - mfi.merge = func(dst, src pointer) { - sps := src.getPointerSlice() - if sps != nil { - dps := dst.getPointerSlice() - for _, sp := range sps { - var dp pointer - if !sp.isNil() { - dp = valToPointer(reflect.New(tf)) - mergeInfo.merge(dp, sp) - } - dps = append(dps, dp) - } - if dps == nil { - dps = []pointer{} - } - dst.setPointerSlice(dps) - } - } - default: // E.g., *pb.T - mergeInfo := getMergeInfo(tf) - mfi.merge = func(dst, src pointer) { - sp := src.getPointer() - if !sp.isNil() { - dp := dst.getPointer() - if dp.isNil() { - dp = valToPointer(reflect.New(tf)) - dst.setPointer(dp) - } - mergeInfo.merge(dp, sp) - } - } - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic("bad pointer or slice in map case in " + tf.Name()) - default: // E.g., map[K]V - mfi.merge = func(dst, src pointer) { - sm := src.asPointerTo(tf).Elem() - if sm.Len() == 0 { - return - } - dm := dst.asPointerTo(tf).Elem() - if dm.IsNil() { - dm.Set(reflect.MakeMap(tf)) - } - - switch tf.Elem().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - val = reflect.ValueOf(Clone(val.Interface().(Message))) - dm.SetMapIndex(key, val) - } - case reflect.Slice: // E.g. Bytes type (e.g., []byte) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - dm.SetMapIndex(key, val) - } - default: // Basic type (e.g., string) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - dm.SetMapIndex(key, val) - } - } - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic("bad pointer or slice in interface case in " + tf.Name()) - default: // E.g., interface{} - // TODO: Make this faster? - mfi.merge = func(dst, src pointer) { - su := src.asPointerTo(tf).Elem() - if !su.IsNil() { - du := dst.asPointerTo(tf).Elem() - typ := su.Elem().Type() - if du.IsNil() || du.Elem().Type() != typ { - du.Set(reflect.New(typ.Elem())) // Initialize interface if empty - } - sv := su.Elem().Elem().Field(0) - if sv.Kind() == reflect.Ptr && sv.IsNil() { - return - } - dv := du.Elem().Elem().Field(0) - if dv.Kind() == reflect.Ptr && dv.IsNil() { - dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty - } - switch sv.Type().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - Merge(dv.Interface().(Message), sv.Interface().(Message)) - case reflect.Slice: // E.g. Bytes type (e.g., []byte) - dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...))) - default: // Basic type (e.g., string) - dv.Set(sv) - } - } - } - } - default: - panic(fmt.Sprintf("merger not found for type:%s", tf)) - } - mi.fields = append(mi.fields, mfi) - } - - mi.unrecognized = invalidField - if f, ok := t.FieldByName("XXX_unrecognized"); ok { - if f.Type != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - mi.unrecognized = toField(&f) - } - - atomic.StoreInt32(&mi.initialized, 1) -} diff --git a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go b/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go deleted file mode 100644 index bb2622f..0000000 --- a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go +++ /dev/null @@ -1,2245 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "errors" - "fmt" - "io" - "math" - "reflect" - "strconv" - "strings" - "sync" - "sync/atomic" - "unicode/utf8" -) - -// Unmarshal is the entry point from the generated .pb.go files. -// This function is not intended to be used by non-generated code. -// This function is not subject to any compatibility guarantee. -// msg contains a pointer to a protocol buffer struct. -// b is the data to be unmarshaled into the protocol buffer. -// a is a pointer to a place to store cached unmarshal information. -func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error { - // Load the unmarshal information for this message type. - // The atomic load ensures memory consistency. - u := atomicLoadUnmarshalInfo(&a.unmarshal) - if u == nil { - // Slow path: find unmarshal info for msg, update a with it. - u = getUnmarshalInfo(reflect.TypeOf(msg).Elem()) - atomicStoreUnmarshalInfo(&a.unmarshal, u) - } - // Then do the unmarshaling. - err := u.unmarshal(toPointer(&msg), b) - return err -} - -type unmarshalInfo struct { - typ reflect.Type // type of the protobuf struct - - // 0 = only typ field is initialized - // 1 = completely initialized - initialized int32 - lock sync.Mutex // prevents double initialization - dense []unmarshalFieldInfo // fields indexed by tag # - sparse map[uint64]unmarshalFieldInfo // fields indexed by tag # - reqFields []string // names of required fields - reqMask uint64 // 1< 0 { - // Read tag and wire type. - // Special case 1 and 2 byte varints. - var x uint64 - if b[0] < 128 { - x = uint64(b[0]) - b = b[1:] - } else if len(b) >= 2 && b[1] < 128 { - x = uint64(b[0]&0x7f) + uint64(b[1])<<7 - b = b[2:] - } else { - var n int - x, n = decodeVarint(b) - if n == 0 { - return io.ErrUnexpectedEOF - } - b = b[n:] - } - tag := x >> 3 - wire := int(x) & 7 - - // Dispatch on the tag to one of the unmarshal* functions below. - var f unmarshalFieldInfo - if tag < uint64(len(u.dense)) { - f = u.dense[tag] - } else { - f = u.sparse[tag] - } - if fn := f.unmarshal; fn != nil { - var err error - b, err = fn(b, m.offset(f.field), wire) - if err == nil { - reqMask |= f.reqMask - continue - } - if r, ok := err.(*RequiredNotSetError); ok { - // Remember this error, but keep parsing. We need to produce - // a full parse even if a required field is missing. - if errLater == nil { - errLater = r - } - reqMask |= f.reqMask - continue - } - if err != errInternalBadWireType { - if err == errInvalidUTF8 { - if errLater == nil { - fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name - errLater = &invalidUTF8Error{fullName} - } - continue - } - return err - } - // Fragments with bad wire type are treated as unknown fields. - } - - // Unknown tag. - if !u.unrecognized.IsValid() { - // Don't keep unrecognized data; just skip it. - var err error - b, err = skipField(b, wire) - if err != nil { - return err - } - continue - } - // Keep unrecognized data around. - // maybe in extensions, maybe in the unrecognized field. - z := m.offset(u.unrecognized).toBytes() - var emap map[int32]Extension - var e Extension - for _, r := range u.extensionRanges { - if uint64(r.Start) <= tag && tag <= uint64(r.End) { - if u.extensions.IsValid() { - mp := m.offset(u.extensions).toExtensions() - emap = mp.extensionsWrite() - e = emap[int32(tag)] - z = &e.enc - break - } - if u.oldExtensions.IsValid() { - p := m.offset(u.oldExtensions).toOldExtensions() - emap = *p - if emap == nil { - emap = map[int32]Extension{} - *p = emap - } - e = emap[int32(tag)] - z = &e.enc - break - } - if u.bytesExtensions.IsValid() { - z = m.offset(u.bytesExtensions).toBytes() - break - } - panic("no extensions field available") - } - } - // Use wire type to skip data. - var err error - b0 := b - b, err = skipField(b, wire) - if err != nil { - return err - } - *z = encodeVarint(*z, tag<<3|uint64(wire)) - *z = append(*z, b0[:len(b0)-len(b)]...) - - if emap != nil { - emap[int32(tag)] = e - } - } - if reqMask != u.reqMask && errLater == nil { - // A required field of this message is missing. - for _, n := range u.reqFields { - if reqMask&1 == 0 { - errLater = &RequiredNotSetError{n} - } - reqMask >>= 1 - } - } - return errLater -} - -// computeUnmarshalInfo fills in u with information for use -// in unmarshaling protocol buffers of type u.typ. -func (u *unmarshalInfo) computeUnmarshalInfo() { - u.lock.Lock() - defer u.lock.Unlock() - if u.initialized != 0 { - return - } - t := u.typ - n := t.NumField() - - // Set up the "not found" value for the unrecognized byte buffer. - // This is the default for proto3. - u.unrecognized = invalidField - u.extensions = invalidField - u.oldExtensions = invalidField - u.bytesExtensions = invalidField - - // List of the generated type and offset for each oneof field. - type oneofField struct { - ityp reflect.Type // interface type of oneof field - field field // offset in containing message - } - var oneofFields []oneofField - - for i := 0; i < n; i++ { - f := t.Field(i) - if f.Name == "XXX_unrecognized" { - // The byte slice used to hold unrecognized input is special. - if f.Type != reflect.TypeOf(([]byte)(nil)) { - panic("bad type for XXX_unrecognized field: " + f.Type.Name()) - } - u.unrecognized = toField(&f) - continue - } - if f.Name == "XXX_InternalExtensions" { - // Ditto here. - if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) { - panic("bad type for XXX_InternalExtensions field: " + f.Type.Name()) - } - u.extensions = toField(&f) - if f.Tag.Get("protobuf_messageset") == "1" { - u.isMessageSet = true - } - continue - } - if f.Name == "XXX_extensions" { - // An older form of the extensions field. - if f.Type == reflect.TypeOf((map[int32]Extension)(nil)) { - u.oldExtensions = toField(&f) - continue - } else if f.Type == reflect.TypeOf(([]byte)(nil)) { - u.bytesExtensions = toField(&f) - continue - } - panic("bad type for XXX_extensions field: " + f.Type.Name()) - } - if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" { - continue - } - - oneof := f.Tag.Get("protobuf_oneof") - if oneof != "" { - oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)}) - // The rest of oneof processing happens below. - continue - } - - tags := f.Tag.Get("protobuf") - tagArray := strings.Split(tags, ",") - if len(tagArray) < 2 { - panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags) - } - tag, err := strconv.Atoi(tagArray[1]) - if err != nil { - panic("protobuf tag field not an integer: " + tagArray[1]) - } - - name := "" - for _, tag := range tagArray[3:] { - if strings.HasPrefix(tag, "name=") { - name = tag[5:] - } - } - - // Extract unmarshaling function from the field (its type and tags). - unmarshal := fieldUnmarshaler(&f) - - // Required field? - var reqMask uint64 - if tagArray[2] == "req" { - bit := len(u.reqFields) - u.reqFields = append(u.reqFields, name) - reqMask = uint64(1) << uint(bit) - // TODO: if we have more than 64 required fields, we end up - // not verifying that all required fields are present. - // Fix this, perhaps using a count of required fields? - } - - // Store the info in the correct slot in the message. - u.setTag(tag, toField(&f), unmarshal, reqMask, name) - } - - // Find any types associated with oneof fields. - // TODO: XXX_OneofFuncs returns more info than we need. Get rid of some of it? - fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs") - // gogo: len(oneofFields) > 0 is needed for embedded oneof messages, without a marshaler and unmarshaler - if fn.IsValid() && len(oneofFields) > 0 { - res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{} - for i := res.Len() - 1; i >= 0; i-- { - v := res.Index(i) // interface{} - tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X - typ := tptr.Elem() // Msg_X - - f := typ.Field(0) // oneof implementers have one field - baseUnmarshal := fieldUnmarshaler(&f) - tags := strings.Split(f.Tag.Get("protobuf"), ",") - fieldNum, err := strconv.Atoi(tags[1]) - if err != nil { - panic("protobuf tag field not an integer: " + tags[1]) - } - var name string - for _, tag := range tags { - if strings.HasPrefix(tag, "name=") { - name = strings.TrimPrefix(tag, "name=") - break - } - } - - // Find the oneof field that this struct implements. - // Might take O(n^2) to process all of the oneofs, but who cares. - for _, of := range oneofFields { - if tptr.Implements(of.ityp) { - // We have found the corresponding interface for this struct. - // That lets us know where this struct should be stored - // when we encounter it during unmarshaling. - unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) - u.setTag(fieldNum, of.field, unmarshal, 0, name) - } - } - } - } - - // Get extension ranges, if any. - fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") - if fn.IsValid() { - if !u.extensions.IsValid() && !u.oldExtensions.IsValid() && !u.bytesExtensions.IsValid() { - panic("a message with extensions, but no extensions field in " + t.Name()) - } - u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange) - } - - // Explicitly disallow tag 0. This will ensure we flag an error - // when decoding a buffer of all zeros. Without this code, we - // would decode and skip an all-zero buffer of even length. - // [0 0] is [tag=0/wiretype=varint varint-encoded-0]. - u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) { - return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w) - }, 0, "") - - // Set mask for required field check. - u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here? - for len(u.dense) <= tag { - u.dense = append(u.dense, unmarshalFieldInfo{}) - } - u.dense[tag] = i - return - } - if u.sparse == nil { - u.sparse = map[uint64]unmarshalFieldInfo{} - } - u.sparse[uint64(tag)] = i -} - -// fieldUnmarshaler returns an unmarshaler for the given field. -func fieldUnmarshaler(f *reflect.StructField) unmarshaler { - if f.Type.Kind() == reflect.Map { - return makeUnmarshalMap(f) - } - return typeUnmarshaler(f.Type, f.Tag.Get("protobuf")) -} - -// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair. -func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { - tagArray := strings.Split(tags, ",") - encoding := tagArray[0] - name := "unknown" - ctype := false - isTime := false - isDuration := false - isWktPointer := false - proto3 := false - validateUTF8 := true - for _, tag := range tagArray[3:] { - if strings.HasPrefix(tag, "name=") { - name = tag[5:] - } - if tag == "proto3" { - proto3 = true - } - if strings.HasPrefix(tag, "customtype=") { - ctype = true - } - if tag == "stdtime" { - isTime = true - } - if tag == "stdduration" { - isDuration = true - } - if tag == "wktptr" { - isWktPointer = true - } - } - validateUTF8 = validateUTF8 && proto3 - - // Figure out packaging (pointer, slice, or both) - slice := false - pointer := false - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - slice = true - t = t.Elem() - } - if t.Kind() == reflect.Ptr { - pointer = true - t = t.Elem() - } - - if ctype { - if reflect.PtrTo(t).Implements(customType) { - if slice { - return makeUnmarshalCustomSlice(getUnmarshalInfo(t), name) - } - if pointer { - return makeUnmarshalCustomPtr(getUnmarshalInfo(t), name) - } - return makeUnmarshalCustom(getUnmarshalInfo(t), name) - } else { - panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t)) - } - } - - if isTime { - if pointer { - if slice { - return makeUnmarshalTimePtrSlice(getUnmarshalInfo(t), name) - } - return makeUnmarshalTimePtr(getUnmarshalInfo(t), name) - } - if slice { - return makeUnmarshalTimeSlice(getUnmarshalInfo(t), name) - } - return makeUnmarshalTime(getUnmarshalInfo(t), name) - } - - if isDuration { - if pointer { - if slice { - return makeUnmarshalDurationPtrSlice(getUnmarshalInfo(t), name) - } - return makeUnmarshalDurationPtr(getUnmarshalInfo(t), name) - } - if slice { - return makeUnmarshalDurationSlice(getUnmarshalInfo(t), name) - } - return makeUnmarshalDuration(getUnmarshalInfo(t), name) - } - - if isWktPointer { - switch t.Kind() { - case reflect.Float64: - if pointer { - if slice { - return makeStdDoubleValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdDoubleValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdDoubleValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdDoubleValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Float32: - if pointer { - if slice { - return makeStdFloatValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdFloatValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdFloatValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdFloatValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Int64: - if pointer { - if slice { - return makeStdInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdInt64ValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Uint64: - if pointer { - if slice { - return makeStdUInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdUInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdUInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdUInt64ValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Int32: - if pointer { - if slice { - return makeStdInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdInt32ValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Uint32: - if pointer { - if slice { - return makeStdUInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdUInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdUInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdUInt32ValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Bool: - if pointer { - if slice { - return makeStdBoolValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdBoolValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdBoolValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdBoolValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.String: - if pointer { - if slice { - return makeStdStringValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdStringValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdStringValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdStringValueUnmarshaler(getUnmarshalInfo(t), name) - case uint8SliceType: - if pointer { - if slice { - return makeStdBytesValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdBytesValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdBytesValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdBytesValueUnmarshaler(getUnmarshalInfo(t), name) - default: - panic(fmt.Sprintf("unknown wktpointer type %#v", t)) - } - } - - // We'll never have both pointer and slice for basic types. - if pointer && slice && t.Kind() != reflect.Struct { - panic("both pointer and slice for basic type in " + t.Name()) - } - - switch t.Kind() { - case reflect.Bool: - if pointer { - return unmarshalBoolPtr - } - if slice { - return unmarshalBoolSlice - } - return unmarshalBoolValue - case reflect.Int32: - switch encoding { - case "fixed32": - if pointer { - return unmarshalFixedS32Ptr - } - if slice { - return unmarshalFixedS32Slice - } - return unmarshalFixedS32Value - case "varint": - // this could be int32 or enum - if pointer { - return unmarshalInt32Ptr - } - if slice { - return unmarshalInt32Slice - } - return unmarshalInt32Value - case "zigzag32": - if pointer { - return unmarshalSint32Ptr - } - if slice { - return unmarshalSint32Slice - } - return unmarshalSint32Value - } - case reflect.Int64: - switch encoding { - case "fixed64": - if pointer { - return unmarshalFixedS64Ptr - } - if slice { - return unmarshalFixedS64Slice - } - return unmarshalFixedS64Value - case "varint": - if pointer { - return unmarshalInt64Ptr - } - if slice { - return unmarshalInt64Slice - } - return unmarshalInt64Value - case "zigzag64": - if pointer { - return unmarshalSint64Ptr - } - if slice { - return unmarshalSint64Slice - } - return unmarshalSint64Value - } - case reflect.Uint32: - switch encoding { - case "fixed32": - if pointer { - return unmarshalFixed32Ptr - } - if slice { - return unmarshalFixed32Slice - } - return unmarshalFixed32Value - case "varint": - if pointer { - return unmarshalUint32Ptr - } - if slice { - return unmarshalUint32Slice - } - return unmarshalUint32Value - } - case reflect.Uint64: - switch encoding { - case "fixed64": - if pointer { - return unmarshalFixed64Ptr - } - if slice { - return unmarshalFixed64Slice - } - return unmarshalFixed64Value - case "varint": - if pointer { - return unmarshalUint64Ptr - } - if slice { - return unmarshalUint64Slice - } - return unmarshalUint64Value - } - case reflect.Float32: - if pointer { - return unmarshalFloat32Ptr - } - if slice { - return unmarshalFloat32Slice - } - return unmarshalFloat32Value - case reflect.Float64: - if pointer { - return unmarshalFloat64Ptr - } - if slice { - return unmarshalFloat64Slice - } - return unmarshalFloat64Value - case reflect.Map: - panic("map type in typeUnmarshaler in " + t.Name()) - case reflect.Slice: - if pointer { - panic("bad pointer in slice case in " + t.Name()) - } - if slice { - return unmarshalBytesSlice - } - return unmarshalBytesValue - case reflect.String: - if validateUTF8 { - if pointer { - return unmarshalUTF8StringPtr - } - if slice { - return unmarshalUTF8StringSlice - } - return unmarshalUTF8StringValue - } - if pointer { - return unmarshalStringPtr - } - if slice { - return unmarshalStringSlice - } - return unmarshalStringValue - case reflect.Struct: - // message or group field - if !pointer { - switch encoding { - case "bytes": - if slice { - return makeUnmarshalMessageSlice(getUnmarshalInfo(t), name) - } - return makeUnmarshalMessage(getUnmarshalInfo(t), name) - } - } - switch encoding { - case "bytes": - if slice { - return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name) - } - return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name) - case "group": - if slice { - return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name) - } - return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name) - } - } - panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding)) -} - -// Below are all the unmarshalers for individual fields of various types. - -func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - *f.toInt64() = v - return b, nil -} - -func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - *f.toInt64Ptr() = &v - return b, nil -} - -func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - s := f.toInt64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - s := f.toInt64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - *f.toInt64() = v - return b, nil -} - -func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - *f.toInt64Ptr() = &v - return b, nil -} - -func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - s := f.toInt64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - s := f.toInt64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - *f.toUint64() = v - return b, nil -} - -func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - *f.toUint64Ptr() = &v - return b, nil -} - -func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - s := f.toUint64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - s := f.toUint64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - *f.toInt32() = v - return b, nil -} - -func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.setInt32Ptr(v) - return b, nil -} - -func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.appendInt32Slice(v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.appendInt32Slice(v) - return b, nil -} - -func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - *f.toInt32() = v - return b, nil -} - -func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.setInt32Ptr(v) - return b, nil -} - -func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.appendInt32Slice(v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.appendInt32Slice(v) - return b, nil -} - -func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - *f.toUint32() = v - return b, nil -} - -func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - *f.toUint32Ptr() = &v - return b, nil -} - -func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - s := f.toUint32Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - s := f.toUint32Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - *f.toUint64() = v - return b[8:], nil -} - -func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - *f.toUint64Ptr() = &v - return b[8:], nil -} - -func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - s := f.toUint64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - s := f.toUint64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - *f.toInt64() = v - return b[8:], nil -} - -func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - *f.toInt64Ptr() = &v - return b[8:], nil -} - -func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - s := f.toInt64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - s := f.toInt64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - *f.toUint32() = v - return b[4:], nil -} - -func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - *f.toUint32Ptr() = &v - return b[4:], nil -} - -func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - s := f.toUint32Slice() - *s = append(*s, v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - s := f.toUint32Slice() - *s = append(*s, v) - return b[4:], nil -} - -func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - *f.toInt32() = v - return b[4:], nil -} - -func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.setInt32Ptr(v) - return b[4:], nil -} - -func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.appendInt32Slice(v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.appendInt32Slice(v) - return b[4:], nil -} - -func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - // Note: any length varint is allowed, even though any sane - // encoder will use one byte. - // See https://github.com/golang/protobuf/issues/76 - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - // TODO: check if x>1? Tests seem to indicate no. - v := x != 0 - *f.toBool() = v - return b[n:], nil -} - -func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - *f.toBoolPtr() = &v - return b[n:], nil -} - -func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - s := f.toBoolSlice() - *s = append(*s, v) - b = b[n:] - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - s := f.toBoolSlice() - *s = append(*s, v) - return b[n:], nil -} - -func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - *f.toFloat64() = v - return b[8:], nil -} - -func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - *f.toFloat64Ptr() = &v - return b[8:], nil -} - -func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - s := f.toFloat64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - s := f.toFloat64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - *f.toFloat32() = v - return b[4:], nil -} - -func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - *f.toFloat32Ptr() = &v - return b[4:], nil -} - -func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - s := f.toFloat32Slice() - *s = append(*s, v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - s := f.toFloat32Slice() - *s = append(*s, v) - return b[4:], nil -} - -func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toString() = v - return b[x:], nil -} - -func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toStringPtr() = &v - return b[x:], nil -} - -func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - s := f.toStringSlice() - *s = append(*s, v) - return b[x:], nil -} - -func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toString() = v - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toStringPtr() = &v - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - s := f.toStringSlice() - *s = append(*s, v) - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -var emptyBuf [0]byte - -func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - // The use of append here is a trick which avoids the zeroing - // that would be required if we used a make/copy pair. - // We append to emptyBuf instead of nil because we want - // a non-nil result even when the length is 0. - v := append(emptyBuf[:], b[:x]...) - *f.toBytes() = v - return b[x:], nil -} - -func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := append(emptyBuf[:], b[:x]...) - s := f.toBytesSlice() - *s = append(*s, v) - return b[x:], nil -} - -func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - // First read the message field to see if something is there. - // The semantics of multiple submessages are weird. Instead of - // the last one winning (as it is for all other fields), multiple - // submessages are merged. - v := f.getPointer() - if v.isNil() { - v = valToPointer(reflect.New(sub.typ)) - f.setPointer(v) - } - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - return b[x:], err - } -} - -func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := valToPointer(reflect.New(sub.typ)) - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - f.appendPointer(v) - return b[x:], err - } -} - -func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireStartGroup { - return b, errInternalBadWireType - } - x, y := findEndGroup(b) - if x < 0 { - return nil, io.ErrUnexpectedEOF - } - v := f.getPointer() - if v.isNil() { - v = valToPointer(reflect.New(sub.typ)) - f.setPointer(v) - } - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - return b[y:], err - } -} - -func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireStartGroup { - return b, errInternalBadWireType - } - x, y := findEndGroup(b) - if x < 0 { - return nil, io.ErrUnexpectedEOF - } - v := valToPointer(reflect.New(sub.typ)) - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - f.appendPointer(v) - return b[y:], err - } -} - -func makeUnmarshalMap(f *reflect.StructField) unmarshaler { - t := f.Type - kt := t.Key() - vt := t.Elem() - tagArray := strings.Split(f.Tag.Get("protobuf"), ",") - valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") - for _, t := range tagArray { - if strings.HasPrefix(t, "customtype=") { - valTags = append(valTags, t) - } - if t == "stdtime" { - valTags = append(valTags, t) - } - if t == "stdduration" { - valTags = append(valTags, t) - } - if t == "wktptr" { - valTags = append(valTags, t) - } - } - unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key")) - unmarshalVal := typeUnmarshaler(vt, strings.Join(valTags, ",")) - return func(b []byte, f pointer, w int) ([]byte, error) { - // The map entry is a submessage. Figure out how big it is. - if w != WireBytes { - return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes) - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - r := b[x:] // unused data to return - b = b[:x] // data for map entry - - // Note: we could use #keys * #values ~= 200 functions - // to do map decoding without reflection. Probably not worth it. - // Maps will be somewhat slow. Oh well. - - // Read key and value from data. - var nerr nonFatal - k := reflect.New(kt) - v := reflect.New(vt) - for len(b) > 0 { - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - wire := int(x) & 7 - b = b[n:] - - var err error - switch x >> 3 { - case 1: - b, err = unmarshalKey(b, valToPointer(k), wire) - case 2: - b, err = unmarshalVal(b, valToPointer(v), wire) - default: - err = errInternalBadWireType // skip unknown tag - } - - if nerr.Merge(err) { - continue - } - if err != errInternalBadWireType { - return nil, err - } - - // Skip past unknown fields. - b, err = skipField(b, wire) - if err != nil { - return nil, err - } - } - - // Get map, allocate if needed. - m := f.asPointerTo(t).Elem() // an addressable map[K]T - if m.IsNil() { - m.Set(reflect.MakeMap(t)) - } - - // Insert into map. - m.SetMapIndex(k.Elem(), v.Elem()) - - return r, nerr.E - } -} - -// makeUnmarshalOneof makes an unmarshaler for oneof fields. -// for: -// message Msg { -// oneof F { -// int64 X = 1; -// float64 Y = 2; -// } -// } -// typ is the type of the concrete entry for a oneof case (e.g. Msg_X). -// ityp is the interface type of the oneof field (e.g. isMsg_F). -// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64). -// Note that this function will be called once for each case in the oneof. -func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler { - sf := typ.Field(0) - field0 := toField(&sf) - return func(b []byte, f pointer, w int) ([]byte, error) { - // Allocate holder for value. - v := reflect.New(typ) - - // Unmarshal data into holder. - // We unmarshal into the first field of the holder object. - var err error - var nerr nonFatal - b, err = unmarshal(b, valToPointer(v).offset(field0), w) - if !nerr.Merge(err) { - return nil, err - } - - // Write pointer to holder into target field. - f.asPointerTo(ityp).Elem().Set(v) - - return b, nerr.E - } -} - -// Error used by decode internally. -var errInternalBadWireType = errors.New("proto: internal error: bad wiretype") - -// skipField skips past a field of type wire and returns the remaining bytes. -func skipField(b []byte, wire int) ([]byte, error) { - switch wire { - case WireVarint: - _, k := decodeVarint(b) - if k == 0 { - return b, io.ErrUnexpectedEOF - } - b = b[k:] - case WireFixed32: - if len(b) < 4 { - return b, io.ErrUnexpectedEOF - } - b = b[4:] - case WireFixed64: - if len(b) < 8 { - return b, io.ErrUnexpectedEOF - } - b = b[8:] - case WireBytes: - m, k := decodeVarint(b) - if k == 0 || uint64(len(b)-k) < m { - return b, io.ErrUnexpectedEOF - } - b = b[uint64(k)+m:] - case WireStartGroup: - _, i := findEndGroup(b) - if i == -1 { - return b, io.ErrUnexpectedEOF - } - b = b[i:] - default: - return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire) - } - return b, nil -} - -// findEndGroup finds the index of the next EndGroup tag. -// Groups may be nested, so the "next" EndGroup tag is the first -// unpaired EndGroup. -// findEndGroup returns the indexes of the start and end of the EndGroup tag. -// Returns (-1,-1) if it can't find one. -func findEndGroup(b []byte) (int, int) { - depth := 1 - i := 0 - for { - x, n := decodeVarint(b[i:]) - if n == 0 { - return -1, -1 - } - j := i - i += n - switch x & 7 { - case WireVarint: - _, k := decodeVarint(b[i:]) - if k == 0 { - return -1, -1 - } - i += k - case WireFixed32: - if len(b)-4 < i { - return -1, -1 - } - i += 4 - case WireFixed64: - if len(b)-8 < i { - return -1, -1 - } - i += 8 - case WireBytes: - m, k := decodeVarint(b[i:]) - if k == 0 { - return -1, -1 - } - i += k - if uint64(len(b)-i) < m { - return -1, -1 - } - i += int(m) - case WireStartGroup: - depth++ - case WireEndGroup: - depth-- - if depth == 0 { - return j, i - } - default: - return -1, -1 - } - } -} - -// encodeVarint appends a varint-encoded integer to b and returns the result. -func encodeVarint(b []byte, x uint64) []byte { - for x >= 1<<7 { - b = append(b, byte(x&0x7f|0x80)) - x >>= 7 - } - return append(b, byte(x)) -} - -// decodeVarint reads a varint-encoded integer from b. -// Returns the decoded integer and the number of bytes read. -// If there is an error, it returns 0,0. -func decodeVarint(b []byte) (uint64, int) { - var x, y uint64 - if len(b) == 0 { - goto bad - } - x = uint64(b[0]) - if x < 0x80 { - return x, 1 - } - x -= 0x80 - - if len(b) <= 1 { - goto bad - } - y = uint64(b[1]) - x += y << 7 - if y < 0x80 { - return x, 2 - } - x -= 0x80 << 7 - - if len(b) <= 2 { - goto bad - } - y = uint64(b[2]) - x += y << 14 - if y < 0x80 { - return x, 3 - } - x -= 0x80 << 14 - - if len(b) <= 3 { - goto bad - } - y = uint64(b[3]) - x += y << 21 - if y < 0x80 { - return x, 4 - } - x -= 0x80 << 21 - - if len(b) <= 4 { - goto bad - } - y = uint64(b[4]) - x += y << 28 - if y < 0x80 { - return x, 5 - } - x -= 0x80 << 28 - - if len(b) <= 5 { - goto bad - } - y = uint64(b[5]) - x += y << 35 - if y < 0x80 { - return x, 6 - } - x -= 0x80 << 35 - - if len(b) <= 6 { - goto bad - } - y = uint64(b[6]) - x += y << 42 - if y < 0x80 { - return x, 7 - } - x -= 0x80 << 42 - - if len(b) <= 7 { - goto bad - } - y = uint64(b[7]) - x += y << 49 - if y < 0x80 { - return x, 8 - } - x -= 0x80 << 49 - - if len(b) <= 8 { - goto bad - } - y = uint64(b[8]) - x += y << 56 - if y < 0x80 { - return x, 9 - } - x -= 0x80 << 56 - - if len(b) <= 9 { - goto bad - } - y = uint64(b[9]) - x += y << 63 - if y < 2 { - return x, 10 - } - -bad: - return 0, 0 -} diff --git a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go b/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go deleted file mode 100644 index 00d6c7a..0000000 --- a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go +++ /dev/null @@ -1,385 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "io" - "reflect" -) - -func makeUnmarshalMessage(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - // First read the message field to see if something is there. - // The semantics of multiple submessages are weird. Instead of - // the last one winning (as it is for all other fields), multiple - // submessages are merged. - v := f // gogo: changed from v := f.getPointer() - if v.isNil() { - v = valToPointer(reflect.New(sub.typ)) - f.setPointer(v) - } - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - return b[x:], err - } -} - -func makeUnmarshalMessageSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := valToPointer(reflect.New(sub.typ)) - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - f.appendRef(v, sub.typ) // gogo: changed from f.appendPointer(v) - return b[x:], err - } -} - -func makeUnmarshalCustomPtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.New(sub.typ)) - m := s.Interface().(custom) - if err := m.Unmarshal(b[:x]); err != nil { - return nil, err - } - return b[x:], nil - } -} - -func makeUnmarshalCustomSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := reflect.New(sub.typ) - c := m.Interface().(custom) - if err := c.Unmarshal(b[:x]); err != nil { - return nil, err - } - v := valToPointer(m) - f.appendRef(v, sub.typ) - return b[x:], nil - } -} - -func makeUnmarshalCustom(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - - m := f.asPointerTo(sub.typ).Interface().(custom) - if err := m.Unmarshal(b[:x]); err != nil { - return nil, err - } - return b[x:], nil - } -} - -func makeUnmarshalTime(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := ×tamp{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - t, err := timestampFromProto(m) - if err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(t)) - return b[x:], nil - } -} - -func makeUnmarshalTimePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := ×tamp{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - t, err := timestampFromProto(m) - if err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&t)) - return b[x:], nil - } -} - -func makeUnmarshalTimePtrSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := ×tamp{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - t, err := timestampFromProto(m) - if err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&t)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeUnmarshalTimeSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := ×tamp{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - t, err := timestampFromProto(m) - if err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(t)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeUnmarshalDurationPtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &duration{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - d, err := durationFromProto(m) - if err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&d)) - return b[x:], nil - } -} - -func makeUnmarshalDuration(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &duration{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - d, err := durationFromProto(m) - if err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(d)) - return b[x:], nil - } -} - -func makeUnmarshalDurationPtrSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &duration{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - d, err := durationFromProto(m) - if err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&d)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeUnmarshalDurationSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &duration{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - d, err := durationFromProto(m) - if err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(d)) - slice.Set(newSlice) - return b[x:], nil - } -} diff --git a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/text.go b/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/text.go deleted file mode 100644 index 0407ba8..0000000 --- a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/text.go +++ /dev/null @@ -1,928 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for writing the text protocol buffer format. - -import ( - "bufio" - "bytes" - "encoding" - "errors" - "fmt" - "io" - "log" - "math" - "reflect" - "sort" - "strings" - "sync" - "time" -) - -var ( - newline = []byte("\n") - spaces = []byte(" ") - endBraceNewline = []byte("}\n") - backslashN = []byte{'\\', 'n'} - backslashR = []byte{'\\', 'r'} - backslashT = []byte{'\\', 't'} - backslashDQ = []byte{'\\', '"'} - backslashBS = []byte{'\\', '\\'} - posInf = []byte("inf") - negInf = []byte("-inf") - nan = []byte("nan") -) - -type writer interface { - io.Writer - WriteByte(byte) error -} - -// textWriter is an io.Writer that tracks its indentation level. -type textWriter struct { - ind int - complete bool // if the current position is a complete line - compact bool // whether to write out as a one-liner - w writer -} - -func (w *textWriter) WriteString(s string) (n int, err error) { - if !strings.Contains(s, "\n") { - if !w.compact && w.complete { - w.writeIndent() - } - w.complete = false - return io.WriteString(w.w, s) - } - // WriteString is typically called without newlines, so this - // codepath and its copy are rare. We copy to avoid - // duplicating all of Write's logic here. - return w.Write([]byte(s)) -} - -func (w *textWriter) Write(p []byte) (n int, err error) { - newlines := bytes.Count(p, newline) - if newlines == 0 { - if !w.compact && w.complete { - w.writeIndent() - } - n, err = w.w.Write(p) - w.complete = false - return n, err - } - - frags := bytes.SplitN(p, newline, newlines+1) - if w.compact { - for i, frag := range frags { - if i > 0 { - if err := w.w.WriteByte(' '); err != nil { - return n, err - } - n++ - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - } - return n, nil - } - - for i, frag := range frags { - if w.complete { - w.writeIndent() - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - if i+1 < len(frags) { - if err := w.w.WriteByte('\n'); err != nil { - return n, err - } - n++ - } - } - w.complete = len(frags[len(frags)-1]) == 0 - return n, nil -} - -func (w *textWriter) WriteByte(c byte) error { - if w.compact && c == '\n' { - c = ' ' - } - if !w.compact && w.complete { - w.writeIndent() - } - err := w.w.WriteByte(c) - w.complete = c == '\n' - return err -} - -func (w *textWriter) indent() { w.ind++ } - -func (w *textWriter) unindent() { - if w.ind == 0 { - log.Print("proto: textWriter unindented too far") - return - } - w.ind-- -} - -func writeName(w *textWriter, props *Properties) error { - if _, err := w.WriteString(props.OrigName); err != nil { - return err - } - if props.Wire != "group" { - return w.WriteByte(':') - } - return nil -} - -func requiresQuotes(u string) bool { - // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. - for _, ch := range u { - switch { - case ch == '.' || ch == '/' || ch == '_': - continue - case '0' <= ch && ch <= '9': - continue - case 'A' <= ch && ch <= 'Z': - continue - case 'a' <= ch && ch <= 'z': - continue - default: - return true - } - } - return false -} - -// isAny reports whether sv is a google.protobuf.Any message -func isAny(sv reflect.Value) bool { - type wkt interface { - XXX_WellKnownType() string - } - t, ok := sv.Addr().Interface().(wkt) - return ok && t.XXX_WellKnownType() == "Any" -} - -// writeProto3Any writes an expanded google.protobuf.Any message. -// -// It returns (false, nil) if sv value can't be unmarshaled (e.g. because -// required messages are not linked in). -// -// It returns (true, error) when sv was written in expanded format or an error -// was encountered. -func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { - turl := sv.FieldByName("TypeUrl") - val := sv.FieldByName("Value") - if !turl.IsValid() || !val.IsValid() { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - b, ok := val.Interface().([]byte) - if !ok { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - parts := strings.Split(turl.String(), "/") - mt := MessageType(parts[len(parts)-1]) - if mt == nil { - return false, nil - } - m := reflect.New(mt.Elem()) - if err := Unmarshal(b, m.Interface().(Message)); err != nil { - return false, nil - } - w.Write([]byte("[")) - u := turl.String() - if requiresQuotes(u) { - writeString(w, u) - } else { - w.Write([]byte(u)) - } - if w.compact { - w.Write([]byte("]:<")) - } else { - w.Write([]byte("]: <\n")) - w.ind++ - } - if err := tm.writeStruct(w, m.Elem()); err != nil { - return true, err - } - if w.compact { - w.Write([]byte("> ")) - } else { - w.ind-- - w.Write([]byte(">\n")) - } - return true, nil -} - -func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { - if tm.ExpandAny && isAny(sv) { - if canExpand, err := tm.writeProto3Any(w, sv); canExpand { - return err - } - } - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < sv.NumField(); i++ { - fv := sv.Field(i) - props := sprops.Prop[i] - name := st.Field(i).Name - - if name == "XXX_NoUnkeyedLiteral" { - continue - } - - if strings.HasPrefix(name, "XXX_") { - // There are two XXX_ fields: - // XXX_unrecognized []byte - // XXX_extensions map[int32]proto.Extension - // The first is handled here; - // the second is handled at the bottom of this function. - if name == "XXX_unrecognized" && !fv.IsNil() { - if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Field not filled in. This could be an optional field or - // a required field that wasn't filled in. Either way, there - // isn't anything we can show for it. - continue - } - if fv.Kind() == reflect.Slice && fv.IsNil() { - // Repeated field that is empty, or a bytes field that is unused. - continue - } - - if props.Repeated && fv.Kind() == reflect.Slice { - // Repeated field. - for j := 0; j < fv.Len(); j++ { - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - v := fv.Index(j) - if v.Kind() == reflect.Ptr && v.IsNil() { - // A nil message in a repeated field is not valid, - // but we can handle that more gracefully than panicking. - if _, err := w.Write([]byte("\n")); err != nil { - return err - } - continue - } - if len(props.Enum) > 0 { - if err := tm.writeEnum(w, v, props); err != nil { - return err - } - } else if err := tm.writeAny(w, v, props); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Map { - // Map fields are rendered as a repeated struct with key/value fields. - keys := fv.MapKeys() - sort.Sort(mapKeys(keys)) - for _, key := range keys { - val := fv.MapIndex(key) - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - // open struct - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - // key - if _, err := w.WriteString("key:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, key, props.MapKeyProp); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - // nil values aren't legal, but we can avoid panicking because of them. - if val.Kind() != reflect.Ptr || !val.IsNil() { - // value - if _, err := w.WriteString("value:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, val, props.MapValProp); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - // close struct - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { - // empty bytes field - continue - } - if props.proto3 && fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { - // proto3 non-repeated scalar field; skip if zero value - if isProto3Zero(fv) { - continue - } - } - - if fv.Kind() == reflect.Interface { - // Check if it is a oneof. - if st.Field(i).Tag.Get("protobuf_oneof") != "" { - // fv is nil, or holds a pointer to generated struct. - // That generated struct has exactly one field, - // which has a protobuf struct tag. - if fv.IsNil() { - continue - } - inner := fv.Elem().Elem() // interface -> *T -> T - tag := inner.Type().Field(0).Tag.Get("protobuf") - props = new(Properties) // Overwrite the outer props var, but not its pointee. - props.Parse(tag) - // Write the value in the oneof, not the oneof itself. - fv = inner.Field(0) - - // Special case to cope with malformed messages gracefully: - // If the value in the oneof is a nil pointer, don't panic - // in writeAny. - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Use errors.New so writeAny won't render quotes. - msg := errors.New("/* nil */") - fv = reflect.ValueOf(&msg).Elem() - } - } - } - - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - - if len(props.Enum) > 0 { - if err := tm.writeEnum(w, fv, props); err != nil { - return err - } - } else if err := tm.writeAny(w, fv, props); err != nil { - return err - } - - if err := w.WriteByte('\n'); err != nil { - return err - } - } - - // Extensions (the XXX_extensions field). - pv := sv - if pv.CanAddr() { - pv = sv.Addr() - } else { - pv = reflect.New(sv.Type()) - pv.Elem().Set(sv) - } - if _, err := extendable(pv.Interface()); err == nil { - if err := tm.writeExtensions(w, pv); err != nil { - return err - } - } - - return nil -} - -// writeAny writes an arbitrary field. -func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { - v = reflect.Indirect(v) - - if props != nil { - if len(props.CustomType) > 0 { - custom, ok := v.Interface().(Marshaler) - if ok { - data, err := custom.Marshal() - if err != nil { - return err - } - if err := writeString(w, string(data)); err != nil { - return err - } - return nil - } - } else if len(props.CastType) > 0 { - if _, ok := v.Interface().(interface { - String() string - }); ok { - switch v.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - _, err := fmt.Fprintf(w, "%d", v.Interface()) - return err - } - } - } else if props.StdTime { - t, ok := v.Interface().(time.Time) - if !ok { - return fmt.Errorf("stdtime is not time.Time, but %T", v.Interface()) - } - tproto, err := timestampProto(t) - if err != nil { - return err - } - propsCopy := *props // Make a copy so that this is goroutine-safe - propsCopy.StdTime = false - err = tm.writeAny(w, reflect.ValueOf(tproto), &propsCopy) - return err - } else if props.StdDuration { - d, ok := v.Interface().(time.Duration) - if !ok { - return fmt.Errorf("stdtime is not time.Duration, but %T", v.Interface()) - } - dproto := durationProto(d) - propsCopy := *props // Make a copy so that this is goroutine-safe - propsCopy.StdDuration = false - err := tm.writeAny(w, reflect.ValueOf(dproto), &propsCopy) - return err - } - } - - // Floats have special cases. - if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { - x := v.Float() - var b []byte - switch { - case math.IsInf(x, 1): - b = posInf - case math.IsInf(x, -1): - b = negInf - case math.IsNaN(x): - b = nan - } - if b != nil { - _, err := w.Write(b) - return err - } - // Other values are handled below. - } - - // We don't attempt to serialise every possible value type; only those - // that can occur in protocol buffers. - switch v.Kind() { - case reflect.Slice: - // Should only be a []byte; repeated fields are handled in writeStruct. - if err := writeString(w, string(v.Bytes())); err != nil { - return err - } - case reflect.String: - if err := writeString(w, v.String()); err != nil { - return err - } - case reflect.Struct: - // Required/optional group/message. - var bra, ket byte = '<', '>' - if props != nil && props.Wire == "group" { - bra, ket = '{', '}' - } - if err := w.WriteByte(bra); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if v.CanAddr() { - // Calling v.Interface on a struct causes the reflect package to - // copy the entire struct. This is racy with the new Marshaler - // since we atomically update the XXX_sizecache. - // - // Thus, we retrieve a pointer to the struct if possible to avoid - // a race since v.Interface on the pointer doesn't copy the struct. - // - // If v is not addressable, then we are not worried about a race - // since it implies that the binary Marshaler cannot possibly be - // mutating this value. - v = v.Addr() - } - if etm, ok := v.Interface().(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = w.Write(text); err != nil { - return err - } - } else { - if v.Kind() == reflect.Ptr { - v = v.Elem() - } - if err := tm.writeStruct(w, v); err != nil { - return err - } - } - w.unindent() - if err := w.WriteByte(ket); err != nil { - return err - } - default: - _, err := fmt.Fprint(w, v.Interface()) - return err - } - return nil -} - -// equivalent to C's isprint. -func isprint(c byte) bool { - return c >= 0x20 && c < 0x7f -} - -// writeString writes a string in the protocol buffer text format. -// It is similar to strconv.Quote except we don't use Go escape sequences, -// we treat the string as a byte sequence, and we use octal escapes. -// These differences are to maintain interoperability with the other -// languages' implementations of the text format. -func writeString(w *textWriter, s string) error { - // use WriteByte here to get any needed indent - if err := w.WriteByte('"'); err != nil { - return err - } - // Loop over the bytes, not the runes. - for i := 0; i < len(s); i++ { - var err error - // Divergence from C++: we don't escape apostrophes. - // There's no need to escape them, and the C++ parser - // copes with a naked apostrophe. - switch c := s[i]; c { - case '\n': - _, err = w.w.Write(backslashN) - case '\r': - _, err = w.w.Write(backslashR) - case '\t': - _, err = w.w.Write(backslashT) - case '"': - _, err = w.w.Write(backslashDQ) - case '\\': - _, err = w.w.Write(backslashBS) - default: - if isprint(c) { - err = w.w.WriteByte(c) - } else { - _, err = fmt.Fprintf(w.w, "\\%03o", c) - } - } - if err != nil { - return err - } - } - return w.WriteByte('"') -} - -func writeUnknownStruct(w *textWriter, data []byte) (err error) { - if !w.compact { - if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { - return err - } - } - b := NewBuffer(data) - for b.index < len(b.buf) { - x, err := b.DecodeVarint() - if err != nil { - _, ferr := fmt.Fprintf(w, "/* %v */\n", err) - return ferr - } - wire, tag := x&7, x>>3 - if wire == WireEndGroup { - w.unindent() - if _, werr := w.Write(endBraceNewline); werr != nil { - return werr - } - continue - } - if _, ferr := fmt.Fprint(w, tag); ferr != nil { - return ferr - } - if wire != WireStartGroup { - if err = w.WriteByte(':'); err != nil { - return err - } - } - if !w.compact || wire == WireStartGroup { - if err = w.WriteByte(' '); err != nil { - return err - } - } - switch wire { - case WireBytes: - buf, e := b.DecodeRawBytes(false) - if e == nil { - _, err = fmt.Fprintf(w, "%q", buf) - } else { - _, err = fmt.Fprintf(w, "/* %v */", e) - } - case WireFixed32: - x, err = b.DecodeFixed32() - err = writeUnknownInt(w, x, err) - case WireFixed64: - x, err = b.DecodeFixed64() - err = writeUnknownInt(w, x, err) - case WireStartGroup: - err = w.WriteByte('{') - w.indent() - case WireVarint: - x, err = b.DecodeVarint() - err = writeUnknownInt(w, x, err) - default: - _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) - } - if err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - return nil -} - -func writeUnknownInt(w *textWriter, x uint64, err error) error { - if err == nil { - _, err = fmt.Fprint(w, x) - } else { - _, err = fmt.Fprintf(w, "/* %v */", err) - } - return err -} - -type int32Slice []int32 - -func (s int32Slice) Len() int { return len(s) } -func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } -func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// writeExtensions writes all the extensions in pv. -// pv is assumed to be a pointer to a protocol message struct that is extendable. -func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { - emap := extensionMaps[pv.Type().Elem()] - e := pv.Interface().(Message) - - var m map[int32]Extension - var mu sync.Locker - if em, ok := e.(extensionsBytes); ok { - eb := em.GetExtensions() - var err error - m, err = BytesToExtensionsMap(*eb) - if err != nil { - return err - } - mu = notLocker{} - } else if _, ok := e.(extendableProto); ok { - ep, _ := extendable(e) - m, mu = ep.extensionsRead() - if m == nil { - return nil - } - } - - // Order the extensions by ID. - // This isn't strictly necessary, but it will give us - // canonical output, which will also make testing easier. - - mu.Lock() - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) - mu.Unlock() - - for _, extNum := range ids { - ext := m[extNum] - var desc *ExtensionDesc - if emap != nil { - desc = emap[extNum] - } - if desc == nil { - // Unknown extension. - if err := writeUnknownStruct(w, ext.enc); err != nil { - return err - } - continue - } - - pb, err := GetExtension(e, desc) - if err != nil { - return fmt.Errorf("failed getting extension: %v", err) - } - - // Repeated extensions will appear as a slice. - if !desc.repeated() { - if err := tm.writeExtension(w, desc.Name, pb); err != nil { - return err - } - } else { - v := reflect.ValueOf(pb) - for i := 0; i < v.Len(); i++ { - if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { - return err - } - } - } - } - return nil -} - -func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { - if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - return nil -} - -func (w *textWriter) writeIndent() { - if !w.complete { - return - } - remain := w.ind * 2 - for remain > 0 { - n := remain - if n > len(spaces) { - n = len(spaces) - } - w.w.Write(spaces[:n]) - remain -= n - } - w.complete = false -} - -// TextMarshaler is a configurable text format marshaler. -type TextMarshaler struct { - Compact bool // use compact text format (one line). - ExpandAny bool // expand google.protobuf.Any messages of known types -} - -// Marshal writes a given protocol buffer in text format. -// The only errors returned are from w. -func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { - val := reflect.ValueOf(pb) - if pb == nil || val.IsNil() { - w.Write([]byte("")) - return nil - } - var bw *bufio.Writer - ww, ok := w.(writer) - if !ok { - bw = bufio.NewWriter(w) - ww = bw - } - aw := &textWriter{ - w: ww, - complete: true, - compact: tm.Compact, - } - - if etm, ok := pb.(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = aw.Write(text); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil - } - // Dereference the received pointer so we don't have outer < and >. - v := reflect.Indirect(val) - if err := tm.writeStruct(aw, v); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil -} - -// Text is the same as Marshal, but returns the string directly. -func (tm *TextMarshaler) Text(pb Message) string { - var buf bytes.Buffer - tm.Marshal(&buf, pb) - return buf.String() -} - -var ( - defaultTextMarshaler = TextMarshaler{} - compactTextMarshaler = TextMarshaler{Compact: true} -) - -// TODO: consider removing some of the Marshal functions below. - -// MarshalText writes a given protocol buffer in text format. -// The only errors returned are from w. -func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } - -// MarshalTextString is the same as MarshalText, but returns the string directly. -func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } - -// CompactText writes a given protocol buffer in compact text format (one line). -func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } - -// CompactTextString is the same as CompactText, but returns the string directly. -func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/text_gogo.go b/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/text_gogo.go deleted file mode 100644 index 1d6c6aa..0000000 --- a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/text_gogo.go +++ /dev/null @@ -1,57 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "reflect" -) - -func (tm *TextMarshaler) writeEnum(w *textWriter, v reflect.Value, props *Properties) error { - m, ok := enumStringMaps[props.Enum] - if !ok { - if err := tm.writeAny(w, v, props); err != nil { - return err - } - } - key := int32(0) - if v.Kind() == reflect.Ptr { - key = int32(v.Elem().Int()) - } else { - key = int32(v.Int()) - } - s, ok := m[key] - if !ok { - if err := tm.writeAny(w, v, props); err != nil { - return err - } - } - _, err := fmt.Fprint(w, s) - return err -} diff --git a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/text_parser.go b/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/text_parser.go deleted file mode 100644 index 1ce0be2..0000000 --- a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/text_parser.go +++ /dev/null @@ -1,1018 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for parsing the Text protocol buffer format. -// TODO: message sets. - -import ( - "encoding" - "errors" - "fmt" - "reflect" - "strconv" - "strings" - "time" - "unicode/utf8" -) - -// Error string emitted when deserializing Any and fields are already set -const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" - -type ParseError struct { - Message string - Line int // 1-based line number - Offset int // 0-based byte offset from start of input -} - -func (p *ParseError) Error() string { - if p.Line == 1 { - // show offset only for first line - return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) - } - return fmt.Sprintf("line %d: %v", p.Line, p.Message) -} - -type token struct { - value string - err *ParseError - line int // line number - offset int // byte number from start of input, not start of line - unquoted string // the unquoted version of value, if it was a quoted string -} - -func (t *token) String() string { - if t.err == nil { - return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) - } - return fmt.Sprintf("parse error: %v", t.err) -} - -type textParser struct { - s string // remaining input - done bool // whether the parsing is finished (success or error) - backed bool // whether back() was called - offset, line int - cur token -} - -func newTextParser(s string) *textParser { - p := new(textParser) - p.s = s - p.line = 1 - p.cur.line = 1 - return p -} - -func (p *textParser) errorf(format string, a ...interface{}) *ParseError { - pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} - p.cur.err = pe - p.done = true - return pe -} - -// Numbers and identifiers are matched by [-+._A-Za-z0-9] -func isIdentOrNumberChar(c byte) bool { - switch { - case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': - return true - case '0' <= c && c <= '9': - return true - } - switch c { - case '-', '+', '.', '_': - return true - } - return false -} - -func isWhitespace(c byte) bool { - switch c { - case ' ', '\t', '\n', '\r': - return true - } - return false -} - -func isQuote(c byte) bool { - switch c { - case '"', '\'': - return true - } - return false -} - -func (p *textParser) skipWhitespace() { - i := 0 - for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { - if p.s[i] == '#' { - // comment; skip to end of line or input - for i < len(p.s) && p.s[i] != '\n' { - i++ - } - if i == len(p.s) { - break - } - } - if p.s[i] == '\n' { - p.line++ - } - i++ - } - p.offset += i - p.s = p.s[i:len(p.s)] - if len(p.s) == 0 { - p.done = true - } -} - -func (p *textParser) advance() { - // Skip whitespace - p.skipWhitespace() - if p.done { - return - } - - // Start of non-whitespace - p.cur.err = nil - p.cur.offset, p.cur.line = p.offset, p.line - p.cur.unquoted = "" - switch p.s[0] { - case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': - // Single symbol - p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] - case '"', '\'': - // Quoted string - i := 1 - for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { - if p.s[i] == '\\' && i+1 < len(p.s) { - // skip escaped char - i++ - } - i++ - } - if i >= len(p.s) || p.s[i] != p.s[0] { - p.errorf("unmatched quote") - return - } - unq, err := unquoteC(p.s[1:i], rune(p.s[0])) - if err != nil { - p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) - return - } - p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] - p.cur.unquoted = unq - default: - i := 0 - for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { - i++ - } - if i == 0 { - p.errorf("unexpected byte %#x", p.s[0]) - return - } - p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] - } - p.offset += len(p.cur.value) -} - -var ( - errBadUTF8 = errors.New("proto: bad UTF-8") -) - -func unquoteC(s string, quote rune) (string, error) { - // This is based on C++'s tokenizer.cc. - // Despite its name, this is *not* parsing C syntax. - // For instance, "\0" is an invalid quoted string. - - // Avoid allocation in trivial cases. - simple := true - for _, r := range s { - if r == '\\' || r == quote { - simple = false - break - } - } - if simple { - return s, nil - } - - buf := make([]byte, 0, 3*len(s)/2) - for len(s) > 0 { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", errBadUTF8 - } - s = s[n:] - if r != '\\' { - if r < utf8.RuneSelf { - buf = append(buf, byte(r)) - } else { - buf = append(buf, string(r)...) - } - continue - } - - ch, tail, err := unescape(s) - if err != nil { - return "", err - } - buf = append(buf, ch...) - s = tail - } - return string(buf), nil -} - -func unescape(s string) (ch string, tail string, err error) { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", "", errBadUTF8 - } - s = s[n:] - switch r { - case 'a': - return "\a", s, nil - case 'b': - return "\b", s, nil - case 'f': - return "\f", s, nil - case 'n': - return "\n", s, nil - case 'r': - return "\r", s, nil - case 't': - return "\t", s, nil - case 'v': - return "\v", s, nil - case '?': - return "?", s, nil // trigraph workaround - case '\'', '"', '\\': - return string(r), s, nil - case '0', '1', '2', '3', '4', '5', '6', '7': - if len(s) < 2 { - return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) - } - ss := string(r) + s[:2] - s = s[2:] - i, err := strconv.ParseUint(ss, 8, 8) - if err != nil { - return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) - } - return string([]byte{byte(i)}), s, nil - case 'x', 'X', 'u', 'U': - var n int - switch r { - case 'x', 'X': - n = 2 - case 'u': - n = 4 - case 'U': - n = 8 - } - if len(s) < n { - return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) - } - ss := s[:n] - s = s[n:] - i, err := strconv.ParseUint(ss, 16, 64) - if err != nil { - return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) - } - if r == 'x' || r == 'X' { - return string([]byte{byte(i)}), s, nil - } - if i > utf8.MaxRune { - return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) - } - return string(i), s, nil - } - return "", "", fmt.Errorf(`unknown escape \%c`, r) -} - -// Back off the parser by one token. Can only be done between calls to next(). -// It makes the next advance() a no-op. -func (p *textParser) back() { p.backed = true } - -// Advances the parser and returns the new current token. -func (p *textParser) next() *token { - if p.backed || p.done { - p.backed = false - return &p.cur - } - p.advance() - if p.done { - p.cur.value = "" - } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { - // Look for multiple quoted strings separated by whitespace, - // and concatenate them. - cat := p.cur - for { - p.skipWhitespace() - if p.done || !isQuote(p.s[0]) { - break - } - p.advance() - if p.cur.err != nil { - return &p.cur - } - cat.value += " " + p.cur.value - cat.unquoted += p.cur.unquoted - } - p.done = false // parser may have seen EOF, but we want to return cat - p.cur = cat - } - return &p.cur -} - -func (p *textParser) consumeToken(s string) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != s { - p.back() - return p.errorf("expected %q, found %q", s, tok.value) - } - return nil -} - -// Return a RequiredNotSetError indicating which required field was not set. -func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < st.NumField(); i++ { - if !isNil(sv.Field(i)) { - continue - } - - props := sprops.Prop[i] - if props.Required { - return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} - } - } - return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen -} - -// Returns the index in the struct for the named field, as well as the parsed tag properties. -func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { - i, ok := sprops.decoderOrigNames[name] - if ok { - return i, sprops.Prop[i], true - } - return -1, nil, false -} - -// Consume a ':' from the input stream (if the next token is a colon), -// returning an error if a colon is needed but not present. -func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ":" { - // Colon is optional when the field is a group or message. - needColon := true - switch props.Wire { - case "group": - needColon = false - case "bytes": - // A "bytes" field is either a message, a string, or a repeated field; - // those three become *T, *string and []T respectively, so we can check for - // this field being a pointer to a non-string. - if typ.Kind() == reflect.Ptr { - // *T or *string - if typ.Elem().Kind() == reflect.String { - break - } - } else if typ.Kind() == reflect.Slice { - // []T or []*T - if typ.Elem().Kind() != reflect.Ptr { - break - } - } else if typ.Kind() == reflect.String { - // The proto3 exception is for a string field, - // which requires a colon. - break - } - needColon = false - } - if needColon { - return p.errorf("expected ':', found %q", tok.value) - } - p.back() - } - return nil -} - -func (p *textParser) readStruct(sv reflect.Value, terminator string) error { - st := sv.Type() - sprops := GetProperties(st) - reqCount := sprops.reqCount - var reqFieldErr error - fieldSet := make(map[string]bool) - // A struct is a sequence of "name: value", terminated by one of - // '>' or '}', or the end of the input. A name may also be - // "[extension]" or "[type/url]". - // - // The whole struct can also be an expanded Any message, like: - // [type/url] < ... struct contents ... > - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - if tok.value == "[" { - // Looks like an extension or an Any. - // - // TODO: Check whether we need to handle - // namespace rooted names (e.g. ".something.Foo"). - extName, err := p.consumeExtName() - if err != nil { - return err - } - - if s := strings.LastIndex(extName, "/"); s >= 0 { - // If it contains a slash, it's an Any type URL. - messageName := extName[s+1:] - mt := MessageType(messageName) - if mt == nil { - return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) - } - tok = p.next() - if tok.err != nil { - return tok.err - } - // consume an optional colon - if tok.value == ":" { - tok = p.next() - if tok.err != nil { - return tok.err - } - } - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - v := reflect.New(mt.Elem()) - if pe := p.readStruct(v.Elem(), terminator); pe != nil { - return pe - } - b, err := Marshal(v.Interface().(Message)) - if err != nil { - return p.errorf("failed to marshal message of type %q: %v", messageName, err) - } - if fieldSet["type_url"] { - return p.errorf(anyRepeatedlyUnpacked, "type_url") - } - if fieldSet["value"] { - return p.errorf(anyRepeatedlyUnpacked, "value") - } - sv.FieldByName("TypeUrl").SetString(extName) - sv.FieldByName("Value").SetBytes(b) - fieldSet["type_url"] = true - fieldSet["value"] = true - continue - } - - var desc *ExtensionDesc - // This could be faster, but it's functional. - // TODO: Do something smarter than a linear scan. - for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { - if d.Name == extName { - desc = d - break - } - } - if desc == nil { - return p.errorf("unrecognized extension %q", extName) - } - - props := &Properties{} - props.Parse(desc.Tag) - - typ := reflect.TypeOf(desc.ExtensionType) - if err := p.checkForColon(props, typ); err != nil { - return err - } - - rep := desc.repeated() - - // Read the extension structure, and set it in - // the value we're constructing. - var ext reflect.Value - if !rep { - ext = reflect.New(typ).Elem() - } else { - ext = reflect.New(typ.Elem()).Elem() - } - if err := p.readAny(ext, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - ep := sv.Addr().Interface().(Message) - if !rep { - SetExtension(ep, desc, ext.Interface()) - } else { - old, err := GetExtension(ep, desc) - var sl reflect.Value - if err == nil { - sl = reflect.ValueOf(old) // existing slice - } else { - sl = reflect.MakeSlice(typ, 0, 1) - } - sl = reflect.Append(sl, ext) - SetExtension(ep, desc, sl.Interface()) - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - continue - } - - // This is a normal, non-extension field. - name := tok.value - var dst reflect.Value - fi, props, ok := structFieldByName(sprops, name) - if ok { - dst = sv.Field(fi) - } else if oop, ok := sprops.OneofTypes[name]; ok { - // It is a oneof. - props = oop.Prop - nv := reflect.New(oop.Type.Elem()) - dst = nv.Elem().Field(0) - field := sv.Field(oop.Field) - if !field.IsNil() { - return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) - } - field.Set(nv) - } - if !dst.IsValid() { - return p.errorf("unknown field name %q in %v", name, st) - } - - if dst.Kind() == reflect.Map { - // Consume any colon. - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Construct the map if it doesn't already exist. - if dst.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) - } - key := reflect.New(dst.Type().Key()).Elem() - val := reflect.New(dst.Type().Elem()).Elem() - - // The map entry should be this sequence of tokens: - // < key : KEY value : VALUE > - // However, implementations may omit key or value, and technically - // we should support them in any order. See b/28924776 for a time - // this went wrong. - - tok := p.next() - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - switch tok.value { - case "key": - if err := p.consumeToken(":"); err != nil { - return err - } - if err := p.readAny(key, props.MapKeyProp); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - case "value": - if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil { - return err - } - if err := p.readAny(val, props.MapValProp); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - default: - p.back() - return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) - } - } - - dst.SetMapIndex(key, val) - continue - } - - // Check that it's not already set if it's not a repeated field. - if !props.Repeated && fieldSet[name] { - return p.errorf("non-repeated field %q was repeated", name) - } - - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Parse into the field. - fieldSet[name] = true - if err := p.readAny(dst, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - if props.Required { - reqCount-- - } - - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - - } - - if reqCount > 0 { - return p.missingRequiredFieldError(sv) - } - return reqFieldErr -} - -// consumeExtName consumes extension name or expanded Any type URL and the -// following ']'. It returns the name or URL consumed. -func (p *textParser) consumeExtName() (string, error) { - tok := p.next() - if tok.err != nil { - return "", tok.err - } - - // If extension name or type url is quoted, it's a single token. - if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { - name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) - if err != nil { - return "", err - } - return name, p.consumeToken("]") - } - - // Consume everything up to "]" - var parts []string - for tok.value != "]" { - parts = append(parts, tok.value) - tok = p.next() - if tok.err != nil { - return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) - } - if p.done && tok.value != "]" { - return "", p.errorf("unclosed type_url or extension name") - } - } - return strings.Join(parts, ""), nil -} - -// consumeOptionalSeparator consumes an optional semicolon or comma. -// It is used in readStruct to provide backward compatibility. -func (p *textParser) consumeOptionalSeparator() error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ";" && tok.value != "," { - p.back() - } - return nil -} - -func (p *textParser) readAny(v reflect.Value, props *Properties) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "" { - return p.errorf("unexpected EOF") - } - if len(props.CustomType) > 0 { - if props.Repeated { - t := reflect.TypeOf(v.Interface()) - if t.Kind() == reflect.Slice { - tc := reflect.TypeOf(new(Marshaler)) - ok := t.Elem().Implements(tc.Elem()) - if ok { - fv := v - flen := fv.Len() - if flen == fv.Cap() { - nav := reflect.MakeSlice(v.Type(), flen, 2*flen+1) - reflect.Copy(nav, fv) - fv.Set(nav) - } - fv.SetLen(flen + 1) - - // Read one. - p.back() - return p.readAny(fv.Index(flen), props) - } - } - } - if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { - custom := reflect.New(props.ctype.Elem()).Interface().(Unmarshaler) - err := custom.Unmarshal([]byte(tok.unquoted)) - if err != nil { - return p.errorf("%v %v: %v", err, v.Type(), tok.value) - } - v.Set(reflect.ValueOf(custom)) - } else { - custom := reflect.New(reflect.TypeOf(v.Interface())).Interface().(Unmarshaler) - err := custom.Unmarshal([]byte(tok.unquoted)) - if err != nil { - return p.errorf("%v %v: %v", err, v.Type(), tok.value) - } - v.Set(reflect.Indirect(reflect.ValueOf(custom))) - } - return nil - } - if props.StdTime { - fv := v - p.back() - props.StdTime = false - tproto := ×tamp{} - err := p.readAny(reflect.ValueOf(tproto).Elem(), props) - props.StdTime = true - if err != nil { - return err - } - tim, err := timestampFromProto(tproto) - if err != nil { - return err - } - if props.Repeated { - t := reflect.TypeOf(v.Interface()) - if t.Kind() == reflect.Slice { - if t.Elem().Kind() == reflect.Ptr { - ts := fv.Interface().([]*time.Time) - ts = append(ts, &tim) - fv.Set(reflect.ValueOf(ts)) - return nil - } else { - ts := fv.Interface().([]time.Time) - ts = append(ts, tim) - fv.Set(reflect.ValueOf(ts)) - return nil - } - } - } - if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { - v.Set(reflect.ValueOf(&tim)) - } else { - v.Set(reflect.Indirect(reflect.ValueOf(&tim))) - } - return nil - } - if props.StdDuration { - fv := v - p.back() - props.StdDuration = false - dproto := &duration{} - err := p.readAny(reflect.ValueOf(dproto).Elem(), props) - props.StdDuration = true - if err != nil { - return err - } - dur, err := durationFromProto(dproto) - if err != nil { - return err - } - if props.Repeated { - t := reflect.TypeOf(v.Interface()) - if t.Kind() == reflect.Slice { - if t.Elem().Kind() == reflect.Ptr { - ds := fv.Interface().([]*time.Duration) - ds = append(ds, &dur) - fv.Set(reflect.ValueOf(ds)) - return nil - } else { - ds := fv.Interface().([]time.Duration) - ds = append(ds, dur) - fv.Set(reflect.ValueOf(ds)) - return nil - } - } - } - if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { - v.Set(reflect.ValueOf(&dur)) - } else { - v.Set(reflect.Indirect(reflect.ValueOf(&dur))) - } - return nil - } - switch fv := v; fv.Kind() { - case reflect.Slice: - at := v.Type() - if at.Elem().Kind() == reflect.Uint8 { - // Special case for []byte - if tok.value[0] != '"' && tok.value[0] != '\'' { - // Deliberately written out here, as the error after - // this switch statement would write "invalid []byte: ...", - // which is not as user-friendly. - return p.errorf("invalid string: %v", tok.value) - } - bytes := []byte(tok.unquoted) - fv.Set(reflect.ValueOf(bytes)) - return nil - } - // Repeated field. - if tok.value == "[" { - // Repeated field with list notation, like [1,2,3]. - for { - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - err := p.readAny(fv.Index(fv.Len()-1), props) - if err != nil { - return err - } - ntok := p.next() - if ntok.err != nil { - return ntok.err - } - if ntok.value == "]" { - break - } - if ntok.value != "," { - return p.errorf("Expected ']' or ',' found %q", ntok.value) - } - } - return nil - } - // One value of the repeated field. - p.back() - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - return p.readAny(fv.Index(fv.Len()-1), props) - case reflect.Bool: - // true/1/t/True or false/f/0/False. - switch tok.value { - case "true", "1", "t", "True": - fv.SetBool(true) - return nil - case "false", "0", "f", "False": - fv.SetBool(false) - return nil - } - case reflect.Float32, reflect.Float64: - v := tok.value - // Ignore 'f' for compatibility with output generated by C++, but don't - // remove 'f' when the value is "-inf" or "inf". - if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { - v = v[:len(v)-1] - } - if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { - fv.SetFloat(f) - return nil - } - case reflect.Int8: - if x, err := strconv.ParseInt(tok.value, 0, 8); err == nil { - fv.SetInt(x) - return nil - } - case reflect.Int16: - if x, err := strconv.ParseInt(tok.value, 0, 16); err == nil { - fv.SetInt(x) - return nil - } - case reflect.Int32: - if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { - fv.SetInt(x) - return nil - } - - if len(props.Enum) == 0 { - break - } - m, ok := enumValueMaps[props.Enum] - if !ok { - break - } - x, ok := m[tok.value] - if !ok { - break - } - fv.SetInt(int64(x)) - return nil - case reflect.Int64: - if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { - fv.SetInt(x) - return nil - } - - case reflect.Ptr: - // A basic field (indirected through pointer), or a repeated message/group - p.back() - fv.Set(reflect.New(fv.Type().Elem())) - return p.readAny(fv.Elem(), props) - case reflect.String: - if tok.value[0] == '"' || tok.value[0] == '\'' { - fv.SetString(tok.unquoted) - return nil - } - case reflect.Struct: - var terminator string - switch tok.value { - case "{": - terminator = "}" - case "<": - terminator = ">" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - // TODO: Handle nested messages which implement encoding.TextUnmarshaler. - return p.readStruct(fv, terminator) - case reflect.Uint8: - if x, err := strconv.ParseUint(tok.value, 0, 8); err == nil { - fv.SetUint(x) - return nil - } - case reflect.Uint16: - if x, err := strconv.ParseUint(tok.value, 0, 16); err == nil { - fv.SetUint(x) - return nil - } - case reflect.Uint32: - if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - fv.SetUint(uint64(x)) - return nil - } - case reflect.Uint64: - if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { - fv.SetUint(x) - return nil - } - } - return p.errorf("invalid %v: %v", v.Type(), tok.value) -} - -// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb -// before starting to unmarshal, so any existing data in pb is always removed. -// If a required field is not set and no other error occurs, -// UnmarshalText returns *RequiredNotSetError. -func UnmarshalText(s string, pb Message) error { - if um, ok := pb.(encoding.TextUnmarshaler); ok { - return um.UnmarshalText([]byte(s)) - } - pb.Reset() - v := reflect.ValueOf(pb) - return newTextParser(s).readStruct(v.Elem(), "") -} diff --git a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/timestamp.go b/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/timestamp.go deleted file mode 100644 index 9324f65..0000000 --- a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/timestamp.go +++ /dev/null @@ -1,113 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// This file implements operations on google.protobuf.Timestamp. - -import ( - "errors" - "fmt" - "time" -) - -const ( - // Seconds field of the earliest valid Timestamp. - // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - minValidSeconds = -62135596800 - // Seconds field just after the latest valid Timestamp. - // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - maxValidSeconds = 253402300800 -) - -// validateTimestamp determines whether a Timestamp is valid. -// A valid timestamp represents a time in the range -// [0001-01-01, 10000-01-01) and has a Nanos field -// in the range [0, 1e9). -// -// If the Timestamp is valid, validateTimestamp returns nil. -// Otherwise, it returns an error that describes -// the problem. -// -// Every valid Timestamp can be represented by a time.Time, but the converse is not true. -func validateTimestamp(ts *timestamp) error { - if ts == nil { - return errors.New("timestamp: nil Timestamp") - } - if ts.Seconds < minValidSeconds { - return fmt.Errorf("timestamp: %#v before 0001-01-01", ts) - } - if ts.Seconds >= maxValidSeconds { - return fmt.Errorf("timestamp: %#v after 10000-01-01", ts) - } - if ts.Nanos < 0 || ts.Nanos >= 1e9 { - return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts) - } - return nil -} - -// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time. -// It returns an error if the argument is invalid. -// -// Unlike most Go functions, if Timestamp returns an error, the first return value -// is not the zero time.Time. Instead, it is the value obtained from the -// time.Unix function when passed the contents of the Timestamp, in the UTC -// locale. This may or may not be a meaningful time; many invalid Timestamps -// do map to valid time.Times. -// -// A nil Timestamp returns an error. The first return value in that case is -// undefined. -func timestampFromProto(ts *timestamp) (time.Time, error) { - // Don't return the zero value on error, because corresponds to a valid - // timestamp. Instead return whatever time.Unix gives us. - var t time.Time - if ts == nil { - t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp - } else { - t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() - } - return t, validateTimestamp(ts) -} - -// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. -// It returns an error if the resulting Timestamp is invalid. -func timestampProto(t time.Time) (*timestamp, error) { - seconds := t.Unix() - nanos := int32(t.Sub(time.Unix(seconds, 0))) - ts := ×tamp{ - Seconds: seconds, - Nanos: nanos, - } - if err := validateTimestamp(ts); err != nil { - return nil, err - } - return ts, nil -} diff --git a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go b/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go deleted file mode 100644 index 38439fa..0000000 --- a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go +++ /dev/null @@ -1,49 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2016, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "reflect" - "time" -) - -var timeType = reflect.TypeOf((*time.Time)(nil)).Elem() - -type timestamp struct { - Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` -} - -func (m *timestamp) Reset() { *m = timestamp{} } -func (*timestamp) ProtoMessage() {} -func (*timestamp) String() string { return "timestamp" } - -func init() { - RegisterType((*timestamp)(nil), "gogo.protobuf.proto.timestamp") -} diff --git a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/wrappers.go b/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/wrappers.go deleted file mode 100644 index b175d1b..0000000 --- a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/wrappers.go +++ /dev/null @@ -1,1888 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "io" - "reflect" -) - -func makeStdDoubleValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*float64) - v := &float64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*float64) - v := &float64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdDoubleValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64) - v := &float64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64) - v := &float64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdDoubleValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(float64) - v := &float64Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(float64) - v := &float64Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdDoubleValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*float64) - v := &float64Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*float64) - v := &float64Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdDoubleValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdDoubleValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdDoubleValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdDoubleValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdFloatValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*float32) - v := &float32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*float32) - v := &float32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdFloatValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32) - v := &float32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32) - v := &float32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdFloatValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(float32) - v := &float32Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(float32) - v := &float32Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdFloatValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*float32) - v := &float32Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*float32) - v := &float32Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdFloatValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdFloatValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdFloatValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdFloatValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*int64) - v := &int64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*int64) - v := &int64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64) - v := &int64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64) - v := &int64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(int64) - v := &int64Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(int64) - v := &int64Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*int64) - v := &int64Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*int64) - v := &int64Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdUInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*uint64) - v := &uint64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*uint64) - v := &uint64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdUInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64) - v := &uint64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64) - v := &uint64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdUInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(uint64) - v := &uint64Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(uint64) - v := &uint64Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdUInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*uint64) - v := &uint64Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*uint64) - v := &uint64Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdUInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdUInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdUInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdUInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*int32) - v := &int32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*int32) - v := &int32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32) - v := &int32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32) - v := &int32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(int32) - v := &int32Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(int32) - v := &int32Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*int32) - v := &int32Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*int32) - v := &int32Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdUInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*uint32) - v := &uint32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*uint32) - v := &uint32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdUInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32) - v := &uint32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32) - v := &uint32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdUInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(uint32) - v := &uint32Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(uint32) - v := &uint32Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdUInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*uint32) - v := &uint32Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*uint32) - v := &uint32Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdUInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdUInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdUInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdUInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdBoolValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*bool) - v := &boolValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*bool) - v := &boolValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdBoolValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool) - v := &boolValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool) - v := &boolValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdBoolValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(bool) - v := &boolValue{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(bool) - v := &boolValue{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdBoolValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*bool) - v := &boolValue{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*bool) - v := &boolValue{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdBoolValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &boolValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdBoolValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &boolValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdBoolValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &boolValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdBoolValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &boolValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdStringValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*string) - v := &stringValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*string) - v := &stringValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdStringValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string) - v := &stringValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string) - v := &stringValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdStringValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(string) - v := &stringValue{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(string) - v := &stringValue{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdStringValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*string) - v := &stringValue{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*string) - v := &stringValue{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdStringValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &stringValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdStringValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &stringValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdStringValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &stringValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdStringValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &stringValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdBytesValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*[]byte) - v := &bytesValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*[]byte) - v := &bytesValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdBytesValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte) - v := &bytesValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte) - v := &bytesValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdBytesValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().([]byte) - v := &bytesValue{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().([]byte) - v := &bytesValue{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdBytesValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*[]byte) - v := &bytesValue{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*[]byte) - v := &bytesValue{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdBytesValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &bytesValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdBytesValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &bytesValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdBytesValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &bytesValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdBytesValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &bytesValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} diff --git a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go b/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go deleted file mode 100644 index c1cf7bf..0000000 --- a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go +++ /dev/null @@ -1,113 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -type float64Value struct { - Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *float64Value) Reset() { *m = float64Value{} } -func (*float64Value) ProtoMessage() {} -func (*float64Value) String() string { return "float64" } - -type float32Value struct { - Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *float32Value) Reset() { *m = float32Value{} } -func (*float32Value) ProtoMessage() {} -func (*float32Value) String() string { return "float32" } - -type int64Value struct { - Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *int64Value) Reset() { *m = int64Value{} } -func (*int64Value) ProtoMessage() {} -func (*int64Value) String() string { return "int64" } - -type uint64Value struct { - Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *uint64Value) Reset() { *m = uint64Value{} } -func (*uint64Value) ProtoMessage() {} -func (*uint64Value) String() string { return "uint64" } - -type int32Value struct { - Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *int32Value) Reset() { *m = int32Value{} } -func (*int32Value) ProtoMessage() {} -func (*int32Value) String() string { return "int32" } - -type uint32Value struct { - Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *uint32Value) Reset() { *m = uint32Value{} } -func (*uint32Value) ProtoMessage() {} -func (*uint32Value) String() string { return "uint32" } - -type boolValue struct { - Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *boolValue) Reset() { *m = boolValue{} } -func (*boolValue) ProtoMessage() {} -func (*boolValue) String() string { return "bool" } - -type stringValue struct { - Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *stringValue) Reset() { *m = stringValue{} } -func (*stringValue) ProtoMessage() {} -func (*stringValue) String() string { return "string" } - -type bytesValue struct { - Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *bytesValue) Reset() { *m = bytesValue{} } -func (*bytesValue) ProtoMessage() {} -func (*bytesValue) String() string { return "[]byte" } - -func init() { - RegisterType((*float64Value)(nil), "gogo.protobuf.proto.DoubleValue") - RegisterType((*float32Value)(nil), "gogo.protobuf.proto.FloatValue") - RegisterType((*int64Value)(nil), "gogo.protobuf.proto.Int64Value") - RegisterType((*uint64Value)(nil), "gogo.protobuf.proto.UInt64Value") - RegisterType((*int32Value)(nil), "gogo.protobuf.proto.Int32Value") - RegisterType((*uint32Value)(nil), "gogo.protobuf.proto.UInt32Value") - RegisterType((*boolValue)(nil), "gogo.protobuf.proto.BoolValue") - RegisterType((*stringValue)(nil), "gogo.protobuf.proto.StringValue") - RegisterType((*bytesValue)(nil), "gogo.protobuf.proto.BytesValue") -} diff --git a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go b/cmd/bpa-operator/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go deleted file mode 100644 index ceadde6..0000000 --- a/cmd/bpa-operator/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go +++ /dev/null @@ -1,101 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package sortkeys - -import ( - "sort" -) - -func Strings(l []string) { - sort.Strings(l) -} - -func Float64s(l []float64) { - sort.Float64s(l) -} - -func Float32s(l []float32) { - sort.Sort(Float32Slice(l)) -} - -func Int64s(l []int64) { - sort.Sort(Int64Slice(l)) -} - -func Int32s(l []int32) { - sort.Sort(Int32Slice(l)) -} - -func Uint64s(l []uint64) { - sort.Sort(Uint64Slice(l)) -} - -func Uint32s(l []uint32) { - sort.Sort(Uint32Slice(l)) -} - -func Bools(l []bool) { - sort.Sort(BoolSlice(l)) -} - -type BoolSlice []bool - -func (p BoolSlice) Len() int { return len(p) } -func (p BoolSlice) Less(i, j int) bool { return p[j] } -func (p BoolSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -type Int64Slice []int64 - -func (p Int64Slice) Len() int { return len(p) } -func (p Int64Slice) Less(i, j int) bool { return p[i] < p[j] } -func (p Int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -type Int32Slice []int32 - -func (p Int32Slice) Len() int { return len(p) } -func (p Int32Slice) Less(i, j int) bool { return p[i] < p[j] } -func (p Int32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -type Uint64Slice []uint64 - -func (p Uint64Slice) Len() int { return len(p) } -func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] } -func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -type Uint32Slice []uint32 - -func (p Uint32Slice) Len() int { return len(p) } -func (p Uint32Slice) Less(i, j int) bool { return p[i] < p[j] } -func (p Uint32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -type Float32Slice []float32 - -func (p Float32Slice) Len() int { return len(p) } -func (p Float32Slice) Less(i, j int) bool { return p[i] < p[j] } -func (p Float32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/cmd/bpa-operator/vendor/github.com/golang/groupcache/LICENSE b/cmd/bpa-operator/vendor/github.com/golang/groupcache/LICENSE deleted file mode 100644 index 37ec93a..0000000 --- a/cmd/bpa-operator/vendor/github.com/golang/groupcache/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/cmd/bpa-operator/vendor/github.com/golang/groupcache/lru/lru.go b/cmd/bpa-operator/vendor/github.com/golang/groupcache/lru/lru.go deleted file mode 100644 index eac1c76..0000000 --- a/cmd/bpa-operator/vendor/github.com/golang/groupcache/lru/lru.go +++ /dev/null @@ -1,133 +0,0 @@ -/* -Copyright 2013 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package lru implements an LRU cache. -package lru - -import "container/list" - -// Cache is an LRU cache. It is not safe for concurrent access. -type Cache struct { - // MaxEntries is the maximum number of cache entries before - // an item is evicted. Zero means no limit. - MaxEntries int - - // OnEvicted optionally specifies a callback function to be - // executed when an entry is purged from the cache. - OnEvicted func(key Key, value interface{}) - - ll *list.List - cache map[interface{}]*list.Element -} - -// A Key may be any value that is comparable. See http://golang.org/ref/spec#Comparison_operators -type Key interface{} - -type entry struct { - key Key - value interface{} -} - -// New creates a new Cache. -// If maxEntries is zero, the cache has no limit and it's assumed -// that eviction is done by the caller. -func New(maxEntries int) *Cache { - return &Cache{ - MaxEntries: maxEntries, - ll: list.New(), - cache: make(map[interface{}]*list.Element), - } -} - -// Add adds a value to the cache. -func (c *Cache) Add(key Key, value interface{}) { - if c.cache == nil { - c.cache = make(map[interface{}]*list.Element) - c.ll = list.New() - } - if ee, ok := c.cache[key]; ok { - c.ll.MoveToFront(ee) - ee.Value.(*entry).value = value - return - } - ele := c.ll.PushFront(&entry{key, value}) - c.cache[key] = ele - if c.MaxEntries != 0 && c.ll.Len() > c.MaxEntries { - c.RemoveOldest() - } -} - -// Get looks up a key's value from the cache. -func (c *Cache) Get(key Key) (value interface{}, ok bool) { - if c.cache == nil { - return - } - if ele, hit := c.cache[key]; hit { - c.ll.MoveToFront(ele) - return ele.Value.(*entry).value, true - } - return -} - -// Remove removes the provided key from the cache. -func (c *Cache) Remove(key Key) { - if c.cache == nil { - return - } - if ele, hit := c.cache[key]; hit { - c.removeElement(ele) - } -} - -// RemoveOldest removes the oldest item from the cache. -func (c *Cache) RemoveOldest() { - if c.cache == nil { - return - } - ele := c.ll.Back() - if ele != nil { - c.removeElement(ele) - } -} - -func (c *Cache) removeElement(e *list.Element) { - c.ll.Remove(e) - kv := e.Value.(*entry) - delete(c.cache, kv.key) - if c.OnEvicted != nil { - c.OnEvicted(kv.key, kv.value) - } -} - -// Len returns the number of items in the cache. -func (c *Cache) Len() int { - if c.cache == nil { - return 0 - } - return c.ll.Len() -} - -// Clear purges all stored items from the cache. -func (c *Cache) Clear() { - if c.OnEvicted != nil { - for _, e := range c.cache { - kv := e.Value.(*entry) - c.OnEvicted(kv.key, kv.value) - } - } - c.ll = nil - c.cache = nil -} diff --git a/cmd/bpa-operator/vendor/github.com/golang/protobuf/AUTHORS b/cmd/bpa-operator/vendor/github.com/golang/protobuf/AUTHORS deleted file mode 100644 index 15167cd..0000000 --- a/cmd/bpa-operator/vendor/github.com/golang/protobuf/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/cmd/bpa-operator/vendor/github.com/golang/protobuf/CONTRIBUTORS b/cmd/bpa-operator/vendor/github.com/golang/protobuf/CONTRIBUTORS deleted file mode 100644 index 1c4577e..0000000 --- a/cmd/bpa-operator/vendor/github.com/golang/protobuf/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/cmd/bpa-operator/vendor/github.com/golang/protobuf/LICENSE b/cmd/bpa-operator/vendor/github.com/golang/protobuf/LICENSE deleted file mode 100644 index 0f64693..0000000 --- a/cmd/bpa-operator/vendor/github.com/golang/protobuf/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright 2010 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/cmd/bpa-operator/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go b/cmd/bpa-operator/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go deleted file mode 100644 index ada2b78..0000000 --- a/cmd/bpa-operator/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go +++ /dev/null @@ -1,1271 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2015 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package jsonpb provides marshaling and unmarshaling between protocol buffers and JSON. -It follows the specification at https://developers.google.com/protocol-buffers/docs/proto3#json. - -This package produces a different output than the standard "encoding/json" package, -which does not operate correctly on protocol buffers. -*/ -package jsonpb - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "math" - "reflect" - "sort" - "strconv" - "strings" - "time" - - "github.com/golang/protobuf/proto" - - stpb "github.com/golang/protobuf/ptypes/struct" -) - -const secondInNanos = int64(time.Second / time.Nanosecond) - -// Marshaler is a configurable object for converting between -// protocol buffer objects and a JSON representation for them. -type Marshaler struct { - // Whether to render enum values as integers, as opposed to string values. - EnumsAsInts bool - - // Whether to render fields with zero values. - EmitDefaults bool - - // A string to indent each level by. The presence of this field will - // also cause a space to appear between the field separator and - // value, and for newlines to be appear between fields and array - // elements. - Indent string - - // Whether to use the original (.proto) name for fields. - OrigName bool - - // A custom URL resolver to use when marshaling Any messages to JSON. - // If unset, the default resolution strategy is to extract the - // fully-qualified type name from the type URL and pass that to - // proto.MessageType(string). - AnyResolver AnyResolver -} - -// AnyResolver takes a type URL, present in an Any message, and resolves it into -// an instance of the associated message. -type AnyResolver interface { - Resolve(typeUrl string) (proto.Message, error) -} - -func defaultResolveAny(typeUrl string) (proto.Message, error) { - // Only the part of typeUrl after the last slash is relevant. - mname := typeUrl - if slash := strings.LastIndex(mname, "/"); slash >= 0 { - mname = mname[slash+1:] - } - mt := proto.MessageType(mname) - if mt == nil { - return nil, fmt.Errorf("unknown message type %q", mname) - } - return reflect.New(mt.Elem()).Interface().(proto.Message), nil -} - -// JSONPBMarshaler is implemented by protobuf messages that customize the -// way they are marshaled to JSON. Messages that implement this should -// also implement JSONPBUnmarshaler so that the custom format can be -// parsed. -// -// The JSON marshaling must follow the proto to JSON specification: -// https://developers.google.com/protocol-buffers/docs/proto3#json -type JSONPBMarshaler interface { - MarshalJSONPB(*Marshaler) ([]byte, error) -} - -// JSONPBUnmarshaler is implemented by protobuf messages that customize -// the way they are unmarshaled from JSON. Messages that implement this -// should also implement JSONPBMarshaler so that the custom format can be -// produced. -// -// The JSON unmarshaling must follow the JSON to proto specification: -// https://developers.google.com/protocol-buffers/docs/proto3#json -type JSONPBUnmarshaler interface { - UnmarshalJSONPB(*Unmarshaler, []byte) error -} - -// Marshal marshals a protocol buffer into JSON. -func (m *Marshaler) Marshal(out io.Writer, pb proto.Message) error { - v := reflect.ValueOf(pb) - if pb == nil || (v.Kind() == reflect.Ptr && v.IsNil()) { - return errors.New("Marshal called with nil") - } - // Check for unset required fields first. - if err := checkRequiredFields(pb); err != nil { - return err - } - writer := &errWriter{writer: out} - return m.marshalObject(writer, pb, "", "") -} - -// MarshalToString converts a protocol buffer object to JSON string. -func (m *Marshaler) MarshalToString(pb proto.Message) (string, error) { - var buf bytes.Buffer - if err := m.Marshal(&buf, pb); err != nil { - return "", err - } - return buf.String(), nil -} - -type int32Slice []int32 - -var nonFinite = map[string]float64{ - `"NaN"`: math.NaN(), - `"Infinity"`: math.Inf(1), - `"-Infinity"`: math.Inf(-1), -} - -// For sorting extensions ids to ensure stable output. -func (s int32Slice) Len() int { return len(s) } -func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } -func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -type wkt interface { - XXX_WellKnownType() string -} - -// marshalObject writes a struct to the Writer. -func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeURL string) error { - if jsm, ok := v.(JSONPBMarshaler); ok { - b, err := jsm.MarshalJSONPB(m) - if err != nil { - return err - } - if typeURL != "" { - // we are marshaling this object to an Any type - var js map[string]*json.RawMessage - if err = json.Unmarshal(b, &js); err != nil { - return fmt.Errorf("type %T produced invalid JSON: %v", v, err) - } - turl, err := json.Marshal(typeURL) - if err != nil { - return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err) - } - js["@type"] = (*json.RawMessage)(&turl) - if b, err = json.Marshal(js); err != nil { - return err - } - } - - out.write(string(b)) - return out.err - } - - s := reflect.ValueOf(v).Elem() - - // Handle well-known types. - if wkt, ok := v.(wkt); ok { - switch wkt.XXX_WellKnownType() { - case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value", - "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue": - // "Wrappers use the same representation in JSON - // as the wrapped primitive type, ..." - sprop := proto.GetProperties(s.Type()) - return m.marshalValue(out, sprop.Prop[0], s.Field(0), indent) - case "Any": - // Any is a bit more involved. - return m.marshalAny(out, v, indent) - case "Duration": - // "Generated output always contains 0, 3, 6, or 9 fractional digits, - // depending on required precision." - s, ns := s.Field(0).Int(), s.Field(1).Int() - if ns <= -secondInNanos || ns >= secondInNanos { - return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos) - } - if (s > 0 && ns < 0) || (s < 0 && ns > 0) { - return errors.New("signs of seconds and nanos do not match") - } - if s < 0 { - ns = -ns - } - x := fmt.Sprintf("%d.%09d", s, ns) - x = strings.TrimSuffix(x, "000") - x = strings.TrimSuffix(x, "000") - x = strings.TrimSuffix(x, ".000") - out.write(`"`) - out.write(x) - out.write(`s"`) - return out.err - case "Struct", "ListValue": - // Let marshalValue handle the `Struct.fields` map or the `ListValue.values` slice. - // TODO: pass the correct Properties if needed. - return m.marshalValue(out, &proto.Properties{}, s.Field(0), indent) - case "Timestamp": - // "RFC 3339, where generated output will always be Z-normalized - // and uses 0, 3, 6 or 9 fractional digits." - s, ns := s.Field(0).Int(), s.Field(1).Int() - if ns < 0 || ns >= secondInNanos { - return fmt.Errorf("ns out of range [0, %v)", secondInNanos) - } - t := time.Unix(s, ns).UTC() - // time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits). - x := t.Format("2006-01-02T15:04:05.000000000") - x = strings.TrimSuffix(x, "000") - x = strings.TrimSuffix(x, "000") - x = strings.TrimSuffix(x, ".000") - out.write(`"`) - out.write(x) - out.write(`Z"`) - return out.err - case "Value": - // Value has a single oneof. - kind := s.Field(0) - if kind.IsNil() { - // "absence of any variant indicates an error" - return errors.New("nil Value") - } - // oneof -> *T -> T -> T.F - x := kind.Elem().Elem().Field(0) - // TODO: pass the correct Properties if needed. - return m.marshalValue(out, &proto.Properties{}, x, indent) - } - } - - out.write("{") - if m.Indent != "" { - out.write("\n") - } - - firstField := true - - if typeURL != "" { - if err := m.marshalTypeURL(out, indent, typeURL); err != nil { - return err - } - firstField = false - } - - for i := 0; i < s.NumField(); i++ { - value := s.Field(i) - valueField := s.Type().Field(i) - if strings.HasPrefix(valueField.Name, "XXX_") { - continue - } - - // IsNil will panic on most value kinds. - switch value.Kind() { - case reflect.Chan, reflect.Func, reflect.Interface: - if value.IsNil() { - continue - } - } - - if !m.EmitDefaults { - switch value.Kind() { - case reflect.Bool: - if !value.Bool() { - continue - } - case reflect.Int32, reflect.Int64: - if value.Int() == 0 { - continue - } - case reflect.Uint32, reflect.Uint64: - if value.Uint() == 0 { - continue - } - case reflect.Float32, reflect.Float64: - if value.Float() == 0 { - continue - } - case reflect.String: - if value.Len() == 0 { - continue - } - case reflect.Map, reflect.Ptr, reflect.Slice: - if value.IsNil() { - continue - } - } - } - - // Oneof fields need special handling. - if valueField.Tag.Get("protobuf_oneof") != "" { - // value is an interface containing &T{real_value}. - sv := value.Elem().Elem() // interface -> *T -> T - value = sv.Field(0) - valueField = sv.Type().Field(0) - } - prop := jsonProperties(valueField, m.OrigName) - if !firstField { - m.writeSep(out) - } - if err := m.marshalField(out, prop, value, indent); err != nil { - return err - } - firstField = false - } - - // Handle proto2 extensions. - if ep, ok := v.(proto.Message); ok { - extensions := proto.RegisteredExtensions(v) - // Sort extensions for stable output. - ids := make([]int32, 0, len(extensions)) - for id, desc := range extensions { - if !proto.HasExtension(ep, desc) { - continue - } - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) - for _, id := range ids { - desc := extensions[id] - if desc == nil { - // unknown extension - continue - } - ext, extErr := proto.GetExtension(ep, desc) - if extErr != nil { - return extErr - } - value := reflect.ValueOf(ext) - var prop proto.Properties - prop.Parse(desc.Tag) - prop.JSONName = fmt.Sprintf("[%s]", desc.Name) - if !firstField { - m.writeSep(out) - } - if err := m.marshalField(out, &prop, value, indent); err != nil { - return err - } - firstField = false - } - - } - - if m.Indent != "" { - out.write("\n") - out.write(indent) - } - out.write("}") - return out.err -} - -func (m *Marshaler) writeSep(out *errWriter) { - if m.Indent != "" { - out.write(",\n") - } else { - out.write(",") - } -} - -func (m *Marshaler) marshalAny(out *errWriter, any proto.Message, indent string) error { - // "If the Any contains a value that has a special JSON mapping, - // it will be converted as follows: {"@type": xxx, "value": yyy}. - // Otherwise, the value will be converted into a JSON object, - // and the "@type" field will be inserted to indicate the actual data type." - v := reflect.ValueOf(any).Elem() - turl := v.Field(0).String() - val := v.Field(1).Bytes() - - var msg proto.Message - var err error - if m.AnyResolver != nil { - msg, err = m.AnyResolver.Resolve(turl) - } else { - msg, err = defaultResolveAny(turl) - } - if err != nil { - return err - } - - if err := proto.Unmarshal(val, msg); err != nil { - return err - } - - if _, ok := msg.(wkt); ok { - out.write("{") - if m.Indent != "" { - out.write("\n") - } - if err := m.marshalTypeURL(out, indent, turl); err != nil { - return err - } - m.writeSep(out) - if m.Indent != "" { - out.write(indent) - out.write(m.Indent) - out.write(`"value": `) - } else { - out.write(`"value":`) - } - if err := m.marshalObject(out, msg, indent+m.Indent, ""); err != nil { - return err - } - if m.Indent != "" { - out.write("\n") - out.write(indent) - } - out.write("}") - return out.err - } - - return m.marshalObject(out, msg, indent, turl) -} - -func (m *Marshaler) marshalTypeURL(out *errWriter, indent, typeURL string) error { - if m.Indent != "" { - out.write(indent) - out.write(m.Indent) - } - out.write(`"@type":`) - if m.Indent != "" { - out.write(" ") - } - b, err := json.Marshal(typeURL) - if err != nil { - return err - } - out.write(string(b)) - return out.err -} - -// marshalField writes field description and value to the Writer. -func (m *Marshaler) marshalField(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error { - if m.Indent != "" { - out.write(indent) - out.write(m.Indent) - } - out.write(`"`) - out.write(prop.JSONName) - out.write(`":`) - if m.Indent != "" { - out.write(" ") - } - if err := m.marshalValue(out, prop, v, indent); err != nil { - return err - } - return nil -} - -// marshalValue writes the value to the Writer. -func (m *Marshaler) marshalValue(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error { - var err error - v = reflect.Indirect(v) - - // Handle nil pointer - if v.Kind() == reflect.Invalid { - out.write("null") - return out.err - } - - // Handle repeated elements. - if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 { - out.write("[") - comma := "" - for i := 0; i < v.Len(); i++ { - sliceVal := v.Index(i) - out.write(comma) - if m.Indent != "" { - out.write("\n") - out.write(indent) - out.write(m.Indent) - out.write(m.Indent) - } - if err := m.marshalValue(out, prop, sliceVal, indent+m.Indent); err != nil { - return err - } - comma = "," - } - if m.Indent != "" { - out.write("\n") - out.write(indent) - out.write(m.Indent) - } - out.write("]") - return out.err - } - - // Handle well-known types. - // Most are handled up in marshalObject (because 99% are messages). - if wkt, ok := v.Interface().(wkt); ok { - switch wkt.XXX_WellKnownType() { - case "NullValue": - out.write("null") - return out.err - } - } - - // Handle enumerations. - if !m.EnumsAsInts && prop.Enum != "" { - // Unknown enum values will are stringified by the proto library as their - // value. Such values should _not_ be quoted or they will be interpreted - // as an enum string instead of their value. - enumStr := v.Interface().(fmt.Stringer).String() - var valStr string - if v.Kind() == reflect.Ptr { - valStr = strconv.Itoa(int(v.Elem().Int())) - } else { - valStr = strconv.Itoa(int(v.Int())) - } - isKnownEnum := enumStr != valStr - if isKnownEnum { - out.write(`"`) - } - out.write(enumStr) - if isKnownEnum { - out.write(`"`) - } - return out.err - } - - // Handle nested messages. - if v.Kind() == reflect.Struct { - return m.marshalObject(out, v.Addr().Interface().(proto.Message), indent+m.Indent, "") - } - - // Handle maps. - // Since Go randomizes map iteration, we sort keys for stable output. - if v.Kind() == reflect.Map { - out.write(`{`) - keys := v.MapKeys() - sort.Sort(mapKeys(keys)) - for i, k := range keys { - if i > 0 { - out.write(`,`) - } - if m.Indent != "" { - out.write("\n") - out.write(indent) - out.write(m.Indent) - out.write(m.Indent) - } - - // TODO handle map key prop properly - b, err := json.Marshal(k.Interface()) - if err != nil { - return err - } - s := string(b) - - // If the JSON is not a string value, encode it again to make it one. - if !strings.HasPrefix(s, `"`) { - b, err := json.Marshal(s) - if err != nil { - return err - } - s = string(b) - } - - out.write(s) - out.write(`:`) - if m.Indent != "" { - out.write(` `) - } - - vprop := prop - if prop != nil && prop.MapValProp != nil { - vprop = prop.MapValProp - } - if err := m.marshalValue(out, vprop, v.MapIndex(k), indent+m.Indent); err != nil { - return err - } - } - if m.Indent != "" { - out.write("\n") - out.write(indent) - out.write(m.Indent) - } - out.write(`}`) - return out.err - } - - // Handle non-finite floats, e.g. NaN, Infinity and -Infinity. - if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { - f := v.Float() - var sval string - switch { - case math.IsInf(f, 1): - sval = `"Infinity"` - case math.IsInf(f, -1): - sval = `"-Infinity"` - case math.IsNaN(f): - sval = `"NaN"` - } - if sval != "" { - out.write(sval) - return out.err - } - } - - // Default handling defers to the encoding/json library. - b, err := json.Marshal(v.Interface()) - if err != nil { - return err - } - needToQuote := string(b[0]) != `"` && (v.Kind() == reflect.Int64 || v.Kind() == reflect.Uint64) - if needToQuote { - out.write(`"`) - } - out.write(string(b)) - if needToQuote { - out.write(`"`) - } - return out.err -} - -// Unmarshaler is a configurable object for converting from a JSON -// representation to a protocol buffer object. -type Unmarshaler struct { - // Whether to allow messages to contain unknown fields, as opposed to - // failing to unmarshal. - AllowUnknownFields bool - - // A custom URL resolver to use when unmarshaling Any messages from JSON. - // If unset, the default resolution strategy is to extract the - // fully-qualified type name from the type URL and pass that to - // proto.MessageType(string). - AnyResolver AnyResolver -} - -// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream. -// This function is lenient and will decode any options permutations of the -// related Marshaler. -func (u *Unmarshaler) UnmarshalNext(dec *json.Decoder, pb proto.Message) error { - inputValue := json.RawMessage{} - if err := dec.Decode(&inputValue); err != nil { - return err - } - if err := u.unmarshalValue(reflect.ValueOf(pb).Elem(), inputValue, nil); err != nil { - return err - } - return checkRequiredFields(pb) -} - -// Unmarshal unmarshals a JSON object stream into a protocol -// buffer. This function is lenient and will decode any options -// permutations of the related Marshaler. -func (u *Unmarshaler) Unmarshal(r io.Reader, pb proto.Message) error { - dec := json.NewDecoder(r) - return u.UnmarshalNext(dec, pb) -} - -// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream. -// This function is lenient and will decode any options permutations of the -// related Marshaler. -func UnmarshalNext(dec *json.Decoder, pb proto.Message) error { - return new(Unmarshaler).UnmarshalNext(dec, pb) -} - -// Unmarshal unmarshals a JSON object stream into a protocol -// buffer. This function is lenient and will decode any options -// permutations of the related Marshaler. -func Unmarshal(r io.Reader, pb proto.Message) error { - return new(Unmarshaler).Unmarshal(r, pb) -} - -// UnmarshalString will populate the fields of a protocol buffer based -// on a JSON string. This function is lenient and will decode any options -// permutations of the related Marshaler. -func UnmarshalString(str string, pb proto.Message) error { - return new(Unmarshaler).Unmarshal(strings.NewReader(str), pb) -} - -// unmarshalValue converts/copies a value into the target. -// prop may be nil. -func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMessage, prop *proto.Properties) error { - targetType := target.Type() - - // Allocate memory for pointer fields. - if targetType.Kind() == reflect.Ptr { - // If input value is "null" and target is a pointer type, then the field should be treated as not set - // UNLESS the target is structpb.Value, in which case it should be set to structpb.NullValue. - _, isJSONPBUnmarshaler := target.Interface().(JSONPBUnmarshaler) - if string(inputValue) == "null" && targetType != reflect.TypeOf(&stpb.Value{}) && !isJSONPBUnmarshaler { - return nil - } - target.Set(reflect.New(targetType.Elem())) - - return u.unmarshalValue(target.Elem(), inputValue, prop) - } - - if jsu, ok := target.Addr().Interface().(JSONPBUnmarshaler); ok { - return jsu.UnmarshalJSONPB(u, []byte(inputValue)) - } - - // Handle well-known types that are not pointers. - if w, ok := target.Addr().Interface().(wkt); ok { - switch w.XXX_WellKnownType() { - case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value", - "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue": - return u.unmarshalValue(target.Field(0), inputValue, prop) - case "Any": - // Use json.RawMessage pointer type instead of value to support pre-1.8 version. - // 1.8 changed RawMessage.MarshalJSON from pointer type to value type, see - // https://github.com/golang/go/issues/14493 - var jsonFields map[string]*json.RawMessage - if err := json.Unmarshal(inputValue, &jsonFields); err != nil { - return err - } - - val, ok := jsonFields["@type"] - if !ok || val == nil { - return errors.New("Any JSON doesn't have '@type'") - } - - var turl string - if err := json.Unmarshal([]byte(*val), &turl); err != nil { - return fmt.Errorf("can't unmarshal Any's '@type': %q", *val) - } - target.Field(0).SetString(turl) - - var m proto.Message - var err error - if u.AnyResolver != nil { - m, err = u.AnyResolver.Resolve(turl) - } else { - m, err = defaultResolveAny(turl) - } - if err != nil { - return err - } - - if _, ok := m.(wkt); ok { - val, ok := jsonFields["value"] - if !ok { - return errors.New("Any JSON doesn't have 'value'") - } - - if err := u.unmarshalValue(reflect.ValueOf(m).Elem(), *val, nil); err != nil { - return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err) - } - } else { - delete(jsonFields, "@type") - nestedProto, err := json.Marshal(jsonFields) - if err != nil { - return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err) - } - - if err = u.unmarshalValue(reflect.ValueOf(m).Elem(), nestedProto, nil); err != nil { - return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err) - } - } - - b, err := proto.Marshal(m) - if err != nil { - return fmt.Errorf("can't marshal proto %T into Any.Value: %v", m, err) - } - target.Field(1).SetBytes(b) - - return nil - case "Duration": - unq, err := unquote(string(inputValue)) - if err != nil { - return err - } - - d, err := time.ParseDuration(unq) - if err != nil { - return fmt.Errorf("bad Duration: %v", err) - } - - ns := d.Nanoseconds() - s := ns / 1e9 - ns %= 1e9 - target.Field(0).SetInt(s) - target.Field(1).SetInt(ns) - return nil - case "Timestamp": - unq, err := unquote(string(inputValue)) - if err != nil { - return err - } - - t, err := time.Parse(time.RFC3339Nano, unq) - if err != nil { - return fmt.Errorf("bad Timestamp: %v", err) - } - - target.Field(0).SetInt(t.Unix()) - target.Field(1).SetInt(int64(t.Nanosecond())) - return nil - case "Struct": - var m map[string]json.RawMessage - if err := json.Unmarshal(inputValue, &m); err != nil { - return fmt.Errorf("bad StructValue: %v", err) - } - - target.Field(0).Set(reflect.ValueOf(map[string]*stpb.Value{})) - for k, jv := range m { - pv := &stpb.Value{} - if err := u.unmarshalValue(reflect.ValueOf(pv).Elem(), jv, prop); err != nil { - return fmt.Errorf("bad value in StructValue for key %q: %v", k, err) - } - target.Field(0).SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(pv)) - } - return nil - case "ListValue": - var s []json.RawMessage - if err := json.Unmarshal(inputValue, &s); err != nil { - return fmt.Errorf("bad ListValue: %v", err) - } - - target.Field(0).Set(reflect.ValueOf(make([]*stpb.Value, len(s)))) - for i, sv := range s { - if err := u.unmarshalValue(target.Field(0).Index(i), sv, prop); err != nil { - return err - } - } - return nil - case "Value": - ivStr := string(inputValue) - if ivStr == "null" { - target.Field(0).Set(reflect.ValueOf(&stpb.Value_NullValue{})) - } else if v, err := strconv.ParseFloat(ivStr, 0); err == nil { - target.Field(0).Set(reflect.ValueOf(&stpb.Value_NumberValue{v})) - } else if v, err := unquote(ivStr); err == nil { - target.Field(0).Set(reflect.ValueOf(&stpb.Value_StringValue{v})) - } else if v, err := strconv.ParseBool(ivStr); err == nil { - target.Field(0).Set(reflect.ValueOf(&stpb.Value_BoolValue{v})) - } else if err := json.Unmarshal(inputValue, &[]json.RawMessage{}); err == nil { - lv := &stpb.ListValue{} - target.Field(0).Set(reflect.ValueOf(&stpb.Value_ListValue{lv})) - return u.unmarshalValue(reflect.ValueOf(lv).Elem(), inputValue, prop) - } else if err := json.Unmarshal(inputValue, &map[string]json.RawMessage{}); err == nil { - sv := &stpb.Struct{} - target.Field(0).Set(reflect.ValueOf(&stpb.Value_StructValue{sv})) - return u.unmarshalValue(reflect.ValueOf(sv).Elem(), inputValue, prop) - } else { - return fmt.Errorf("unrecognized type for Value %q", ivStr) - } - return nil - } - } - - // Handle enums, which have an underlying type of int32, - // and may appear as strings. - // The case of an enum appearing as a number is handled - // at the bottom of this function. - if inputValue[0] == '"' && prop != nil && prop.Enum != "" { - vmap := proto.EnumValueMap(prop.Enum) - // Don't need to do unquoting; valid enum names - // are from a limited character set. - s := inputValue[1 : len(inputValue)-1] - n, ok := vmap[string(s)] - if !ok { - return fmt.Errorf("unknown value %q for enum %s", s, prop.Enum) - } - if target.Kind() == reflect.Ptr { // proto2 - target.Set(reflect.New(targetType.Elem())) - target = target.Elem() - } - if targetType.Kind() != reflect.Int32 { - return fmt.Errorf("invalid target %q for enum %s", targetType.Kind(), prop.Enum) - } - target.SetInt(int64(n)) - return nil - } - - // Handle nested messages. - if targetType.Kind() == reflect.Struct { - var jsonFields map[string]json.RawMessage - if err := json.Unmarshal(inputValue, &jsonFields); err != nil { - return err - } - - consumeField := func(prop *proto.Properties) (json.RawMessage, bool) { - // Be liberal in what names we accept; both orig_name and camelName are okay. - fieldNames := acceptedJSONFieldNames(prop) - - vOrig, okOrig := jsonFields[fieldNames.orig] - vCamel, okCamel := jsonFields[fieldNames.camel] - if !okOrig && !okCamel { - return nil, false - } - // If, for some reason, both are present in the data, favour the camelName. - var raw json.RawMessage - if okOrig { - raw = vOrig - delete(jsonFields, fieldNames.orig) - } - if okCamel { - raw = vCamel - delete(jsonFields, fieldNames.camel) - } - return raw, true - } - - sprops := proto.GetProperties(targetType) - for i := 0; i < target.NumField(); i++ { - ft := target.Type().Field(i) - if strings.HasPrefix(ft.Name, "XXX_") { - continue - } - - valueForField, ok := consumeField(sprops.Prop[i]) - if !ok { - continue - } - - if err := u.unmarshalValue(target.Field(i), valueForField, sprops.Prop[i]); err != nil { - return err - } - } - // Check for any oneof fields. - if len(jsonFields) > 0 { - for _, oop := range sprops.OneofTypes { - raw, ok := consumeField(oop.Prop) - if !ok { - continue - } - nv := reflect.New(oop.Type.Elem()) - target.Field(oop.Field).Set(nv) - if err := u.unmarshalValue(nv.Elem().Field(0), raw, oop.Prop); err != nil { - return err - } - } - } - // Handle proto2 extensions. - if len(jsonFields) > 0 { - if ep, ok := target.Addr().Interface().(proto.Message); ok { - for _, ext := range proto.RegisteredExtensions(ep) { - name := fmt.Sprintf("[%s]", ext.Name) - raw, ok := jsonFields[name] - if !ok { - continue - } - delete(jsonFields, name) - nv := reflect.New(reflect.TypeOf(ext.ExtensionType).Elem()) - if err := u.unmarshalValue(nv.Elem(), raw, nil); err != nil { - return err - } - if err := proto.SetExtension(ep, ext, nv.Interface()); err != nil { - return err - } - } - } - } - if !u.AllowUnknownFields && len(jsonFields) > 0 { - // Pick any field to be the scapegoat. - var f string - for fname := range jsonFields { - f = fname - break - } - return fmt.Errorf("unknown field %q in %v", f, targetType) - } - return nil - } - - // Handle arrays (which aren't encoded bytes) - if targetType.Kind() == reflect.Slice && targetType.Elem().Kind() != reflect.Uint8 { - var slc []json.RawMessage - if err := json.Unmarshal(inputValue, &slc); err != nil { - return err - } - if slc != nil { - l := len(slc) - target.Set(reflect.MakeSlice(targetType, l, l)) - for i := 0; i < l; i++ { - if err := u.unmarshalValue(target.Index(i), slc[i], prop); err != nil { - return err - } - } - } - return nil - } - - // Handle maps (whose keys are always strings) - if targetType.Kind() == reflect.Map { - var mp map[string]json.RawMessage - if err := json.Unmarshal(inputValue, &mp); err != nil { - return err - } - if mp != nil { - target.Set(reflect.MakeMap(targetType)) - for ks, raw := range mp { - // Unmarshal map key. The core json library already decoded the key into a - // string, so we handle that specially. Other types were quoted post-serialization. - var k reflect.Value - if targetType.Key().Kind() == reflect.String { - k = reflect.ValueOf(ks) - } else { - k = reflect.New(targetType.Key()).Elem() - var kprop *proto.Properties - if prop != nil && prop.MapKeyProp != nil { - kprop = prop.MapKeyProp - } - if err := u.unmarshalValue(k, json.RawMessage(ks), kprop); err != nil { - return err - } - } - - // Unmarshal map value. - v := reflect.New(targetType.Elem()).Elem() - var vprop *proto.Properties - if prop != nil && prop.MapValProp != nil { - vprop = prop.MapValProp - } - if err := u.unmarshalValue(v, raw, vprop); err != nil { - return err - } - target.SetMapIndex(k, v) - } - } - return nil - } - - // Non-finite numbers can be encoded as strings. - isFloat := targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64 - if isFloat { - if num, ok := nonFinite[string(inputValue)]; ok { - target.SetFloat(num) - return nil - } - } - - // integers & floats can be encoded as strings. In this case we drop - // the quotes and proceed as normal. - isNum := targetType.Kind() == reflect.Int64 || targetType.Kind() == reflect.Uint64 || - targetType.Kind() == reflect.Int32 || targetType.Kind() == reflect.Uint32 || - targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64 - if isNum && strings.HasPrefix(string(inputValue), `"`) { - inputValue = inputValue[1 : len(inputValue)-1] - } - - // Use the encoding/json for parsing other value types. - return json.Unmarshal(inputValue, target.Addr().Interface()) -} - -func unquote(s string) (string, error) { - var ret string - err := json.Unmarshal([]byte(s), &ret) - return ret, err -} - -// jsonProperties returns parsed proto.Properties for the field and corrects JSONName attribute. -func jsonProperties(f reflect.StructField, origName bool) *proto.Properties { - var prop proto.Properties - prop.Init(f.Type, f.Name, f.Tag.Get("protobuf"), &f) - if origName || prop.JSONName == "" { - prop.JSONName = prop.OrigName - } - return &prop -} - -type fieldNames struct { - orig, camel string -} - -func acceptedJSONFieldNames(prop *proto.Properties) fieldNames { - opts := fieldNames{orig: prop.OrigName, camel: prop.OrigName} - if prop.JSONName != "" { - opts.camel = prop.JSONName - } - return opts -} - -// Writer wrapper inspired by https://blog.golang.org/errors-are-values -type errWriter struct { - writer io.Writer - err error -} - -func (w *errWriter) write(str string) { - if w.err != nil { - return - } - _, w.err = w.writer.Write([]byte(str)) -} - -// Map fields may have key types of non-float scalars, strings and enums. -// The easiest way to sort them in some deterministic order is to use fmt. -// If this turns out to be inefficient we can always consider other options, -// such as doing a Schwartzian transform. -// -// Numeric keys are sorted in numeric order per -// https://developers.google.com/protocol-buffers/docs/proto#maps. -type mapKeys []reflect.Value - -func (s mapKeys) Len() int { return len(s) } -func (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s mapKeys) Less(i, j int) bool { - if k := s[i].Kind(); k == s[j].Kind() { - switch k { - case reflect.String: - return s[i].String() < s[j].String() - case reflect.Int32, reflect.Int64: - return s[i].Int() < s[j].Int() - case reflect.Uint32, reflect.Uint64: - return s[i].Uint() < s[j].Uint() - } - } - return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface()) -} - -// checkRequiredFields returns an error if any required field in the given proto message is not set. -// This function is used by both Marshal and Unmarshal. While required fields only exist in a -// proto2 message, a proto3 message can contain proto2 message(s). -func checkRequiredFields(pb proto.Message) error { - // Most well-known type messages do not contain required fields. The "Any" type may contain - // a message that has required fields. - // - // When an Any message is being marshaled, the code will invoked proto.Unmarshal on Any.Value - // field in order to transform that into JSON, and that should have returned an error if a - // required field is not set in the embedded message. - // - // When an Any message is being unmarshaled, the code will have invoked proto.Marshal on the - // embedded message to store the serialized message in Any.Value field, and that should have - // returned an error if a required field is not set. - if _, ok := pb.(wkt); ok { - return nil - } - - v := reflect.ValueOf(pb) - // Skip message if it is not a struct pointer. - if v.Kind() != reflect.Ptr { - return nil - } - v = v.Elem() - if v.Kind() != reflect.Struct { - return nil - } - - for i := 0; i < v.NumField(); i++ { - field := v.Field(i) - sfield := v.Type().Field(i) - - if sfield.PkgPath != "" { - // blank PkgPath means the field is exported; skip if not exported - continue - } - - if strings.HasPrefix(sfield.Name, "XXX_") { - continue - } - - // Oneof field is an interface implemented by wrapper structs containing the actual oneof - // field, i.e. an interface containing &T{real_value}. - if sfield.Tag.Get("protobuf_oneof") != "" { - if field.Kind() != reflect.Interface { - continue - } - v := field.Elem() - if v.Kind() != reflect.Ptr || v.IsNil() { - continue - } - v = v.Elem() - if v.Kind() != reflect.Struct || v.NumField() < 1 { - continue - } - field = v.Field(0) - sfield = v.Type().Field(0) - } - - protoTag := sfield.Tag.Get("protobuf") - if protoTag == "" { - continue - } - var prop proto.Properties - prop.Init(sfield.Type, sfield.Name, protoTag, &sfield) - - switch field.Kind() { - case reflect.Map: - if field.IsNil() { - continue - } - // Check each map value. - keys := field.MapKeys() - for _, k := range keys { - v := field.MapIndex(k) - if err := checkRequiredFieldsInValue(v); err != nil { - return err - } - } - case reflect.Slice: - // Handle non-repeated type, e.g. bytes. - if !prop.Repeated { - if prop.Required && field.IsNil() { - return fmt.Errorf("required field %q is not set", prop.Name) - } - continue - } - - // Handle repeated type. - if field.IsNil() { - continue - } - // Check each slice item. - for i := 0; i < field.Len(); i++ { - v := field.Index(i) - if err := checkRequiredFieldsInValue(v); err != nil { - return err - } - } - case reflect.Ptr: - if field.IsNil() { - if prop.Required { - return fmt.Errorf("required field %q is not set", prop.Name) - } - continue - } - if err := checkRequiredFieldsInValue(field); err != nil { - return err - } - } - } - - // Handle proto2 extensions. - for _, ext := range proto.RegisteredExtensions(pb) { - if !proto.HasExtension(pb, ext) { - continue - } - ep, err := proto.GetExtension(pb, ext) - if err != nil { - return err - } - err = checkRequiredFieldsInValue(reflect.ValueOf(ep)) - if err != nil { - return err - } - } - - return nil -} - -func checkRequiredFieldsInValue(v reflect.Value) error { - if pm, ok := v.Interface().(proto.Message); ok { - return checkRequiredFields(pm) - } - return nil -} diff --git a/cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/clone.go b/cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/clone.go deleted file mode 100644 index 3cd3249..0000000 --- a/cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/clone.go +++ /dev/null @@ -1,253 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer deep copy and merge. -// TODO: RawMessage. - -package proto - -import ( - "fmt" - "log" - "reflect" - "strings" -) - -// Clone returns a deep copy of a protocol buffer. -func Clone(src Message) Message { - in := reflect.ValueOf(src) - if in.IsNil() { - return src - } - out := reflect.New(in.Type().Elem()) - dst := out.Interface().(Message) - Merge(dst, src) - return dst -} - -// Merger is the interface representing objects that can merge messages of the same type. -type Merger interface { - // Merge merges src into this message. - // Required and optional fields that are set in src will be set to that value in dst. - // Elements of repeated fields will be appended. - // - // Merge may panic if called with a different argument type than the receiver. - Merge(src Message) -} - -// generatedMerger is the custom merge method that generated protos will have. -// We must add this method since a generate Merge method will conflict with -// many existing protos that have a Merge data field already defined. -type generatedMerger interface { - XXX_Merge(src Message) -} - -// Merge merges src into dst. -// Required and optional fields that are set in src will be set to that value in dst. -// Elements of repeated fields will be appended. -// Merge panics if src and dst are not the same type, or if dst is nil. -func Merge(dst, src Message) { - if m, ok := dst.(Merger); ok { - m.Merge(src) - return - } - - in := reflect.ValueOf(src) - out := reflect.ValueOf(dst) - if out.IsNil() { - panic("proto: nil destination") - } - if in.Type() != out.Type() { - panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src)) - } - if in.IsNil() { - return // Merge from nil src is a noop - } - if m, ok := dst.(generatedMerger); ok { - m.XXX_Merge(src) - return - } - mergeStruct(out.Elem(), in.Elem()) -} - -func mergeStruct(out, in reflect.Value) { - sprop := GetProperties(in.Type()) - for i := 0; i < in.NumField(); i++ { - f := in.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) - } - - if emIn, err := extendable(in.Addr().Interface()); err == nil { - emOut, _ := extendable(out.Addr().Interface()) - mIn, muIn := emIn.extensionsRead() - if mIn != nil { - mOut := emOut.extensionsWrite() - muIn.Lock() - mergeExtension(mOut, mIn) - muIn.Unlock() - } - } - - uf := in.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return - } - uin := uf.Bytes() - if len(uin) > 0 { - out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) - } -} - -// mergeAny performs a merge between two values of the same type. -// viaPtr indicates whether the values were indirected through a pointer (implying proto2). -// prop is set if this is a struct field (it may be nil). -func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { - if in.Type() == protoMessageType { - if !in.IsNil() { - if out.IsNil() { - out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) - } else { - Merge(out.Interface().(Message), in.Interface().(Message)) - } - } - return - } - switch in.Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - if !viaPtr && isProto3Zero(in) { - return - } - out.Set(in) - case reflect.Interface: - // Probably a oneof field; copy non-nil values. - if in.IsNil() { - return - } - // Allocate destination if it is not set, or set to a different type. - // Otherwise we will merge as normal. - if out.IsNil() || out.Elem().Type() != in.Elem().Type() { - out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) - } - mergeAny(out.Elem(), in.Elem(), false, nil) - case reflect.Map: - if in.Len() == 0 { - return - } - if out.IsNil() { - out.Set(reflect.MakeMap(in.Type())) - } - // For maps with value types of *T or []byte we need to deep copy each value. - elemKind := in.Type().Elem().Kind() - for _, key := range in.MapKeys() { - var val reflect.Value - switch elemKind { - case reflect.Ptr: - val = reflect.New(in.Type().Elem().Elem()) - mergeAny(val, in.MapIndex(key), false, nil) - case reflect.Slice: - val = in.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - default: - val = in.MapIndex(key) - } - out.SetMapIndex(key, val) - } - case reflect.Ptr: - if in.IsNil() { - return - } - if out.IsNil() { - out.Set(reflect.New(in.Elem().Type())) - } - mergeAny(out.Elem(), in.Elem(), true, nil) - case reflect.Slice: - if in.IsNil() { - return - } - if in.Type().Elem().Kind() == reflect.Uint8 { - // []byte is a scalar bytes field, not a repeated field. - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value, and should not - // be merged. - if prop != nil && prop.proto3 && in.Len() == 0 { - return - } - - // Make a deep copy. - // Append to []byte{} instead of []byte(nil) so that we never end up - // with a nil result. - out.SetBytes(append([]byte{}, in.Bytes()...)) - return - } - n := in.Len() - if out.IsNil() { - out.Set(reflect.MakeSlice(in.Type(), 0, n)) - } - switch in.Type().Elem().Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - out.Set(reflect.AppendSlice(out, in)) - default: - for i := 0; i < n; i++ { - x := reflect.Indirect(reflect.New(in.Type().Elem())) - mergeAny(x, in.Index(i), false, nil) - out.Set(reflect.Append(out, x)) - } - } - case reflect.Struct: - mergeStruct(out, in) - default: - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to copy %v", in) - } -} - -func mergeExtension(out, in map[int32]Extension) { - for extNum, eIn := range in { - eOut := Extension{desc: eIn.desc} - if eIn.value != nil { - v := reflect.New(reflect.TypeOf(eIn.value)).Elem() - mergeAny(v, reflect.ValueOf(eIn.value), false, nil) - eOut.value = v.Interface() - } - if eIn.enc != nil { - eOut.enc = make([]byte, len(eIn.enc)) - copy(eOut.enc, eIn.enc) - } - - out[extNum] = eOut - } -} diff --git a/cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/decode.go b/cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/decode.go deleted file mode 100644 index 63b0f08..0000000 --- a/cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/decode.go +++ /dev/null @@ -1,427 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for decoding protocol buffer data to construct in-memory representations. - */ - -import ( - "errors" - "fmt" - "io" -) - -// errOverflow is returned when an integer is too large to be represented. -var errOverflow = errors.New("proto: integer overflow") - -// ErrInternalBadWireType is returned by generated code when an incorrect -// wire type is encountered. It does not get returned to user code. -var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") - -// DecodeVarint reads a varint-encoded integer from the slice. -// It returns the integer and the number of bytes consumed, or -// zero if there is not enough. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func DecodeVarint(buf []byte) (x uint64, n int) { - for shift := uint(0); shift < 64; shift += 7 { - if n >= len(buf) { - return 0, 0 - } - b := uint64(buf[n]) - n++ - x |= (b & 0x7F) << shift - if (b & 0x80) == 0 { - return x, n - } - } - - // The number is too large to represent in a 64-bit value. - return 0, 0 -} - -func (p *Buffer) decodeVarintSlow() (x uint64, err error) { - i := p.index - l := len(p.buf) - - for shift := uint(0); shift < 64; shift += 7 { - if i >= l { - err = io.ErrUnexpectedEOF - return - } - b := p.buf[i] - i++ - x |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - p.index = i - return - } - } - - // The number is too large to represent in a 64-bit value. - err = errOverflow - return -} - -// DecodeVarint reads a varint-encoded integer from the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) DecodeVarint() (x uint64, err error) { - i := p.index - buf := p.buf - - if i >= len(buf) { - return 0, io.ErrUnexpectedEOF - } else if buf[i] < 0x80 { - p.index++ - return uint64(buf[i]), nil - } else if len(buf)-i < 10 { - return p.decodeVarintSlow() - } - - var b uint64 - // we already checked the first byte - x = uint64(buf[i]) - 0x80 - i++ - - b = uint64(buf[i]) - i++ - x += b << 7 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 7 - - b = uint64(buf[i]) - i++ - x += b << 14 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 14 - - b = uint64(buf[i]) - i++ - x += b << 21 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 21 - - b = uint64(buf[i]) - i++ - x += b << 28 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 28 - - b = uint64(buf[i]) - i++ - x += b << 35 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 35 - - b = uint64(buf[i]) - i++ - x += b << 42 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 42 - - b = uint64(buf[i]) - i++ - x += b << 49 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 49 - - b = uint64(buf[i]) - i++ - x += b << 56 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 56 - - b = uint64(buf[i]) - i++ - x += b << 63 - if b&0x80 == 0 { - goto done - } - - return 0, errOverflow - -done: - p.index = i - return x, nil -} - -// DecodeFixed64 reads a 64-bit integer from the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) DecodeFixed64() (x uint64, err error) { - // x, err already 0 - i := p.index + 8 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-8]) - x |= uint64(p.buf[i-7]) << 8 - x |= uint64(p.buf[i-6]) << 16 - x |= uint64(p.buf[i-5]) << 24 - x |= uint64(p.buf[i-4]) << 32 - x |= uint64(p.buf[i-3]) << 40 - x |= uint64(p.buf[i-2]) << 48 - x |= uint64(p.buf[i-1]) << 56 - return -} - -// DecodeFixed32 reads a 32-bit integer from the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) DecodeFixed32() (x uint64, err error) { - // x, err already 0 - i := p.index + 4 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-4]) - x |= uint64(p.buf[i-3]) << 8 - x |= uint64(p.buf[i-2]) << 16 - x |= uint64(p.buf[i-1]) << 24 - return -} - -// DecodeZigzag64 reads a zigzag-encoded 64-bit integer -// from the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) DecodeZigzag64() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) - return -} - -// DecodeZigzag32 reads a zigzag-encoded 32-bit integer -// from the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) DecodeZigzag32() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) - return -} - -// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { - n, err := p.DecodeVarint() - if err != nil { - return nil, err - } - - nb := int(n) - if nb < 0 { - return nil, fmt.Errorf("proto: bad byte length %d", nb) - } - end := p.index + nb - if end < p.index || end > len(p.buf) { - return nil, io.ErrUnexpectedEOF - } - - if !alloc { - // todo: check if can get more uses of alloc=false - buf = p.buf[p.index:end] - p.index += nb - return - } - - buf = make([]byte, nb) - copy(buf, p.buf[p.index:]) - p.index += nb - return -} - -// DecodeStringBytes reads an encoded string from the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) DecodeStringBytes() (s string, err error) { - buf, err := p.DecodeRawBytes(false) - if err != nil { - return - } - return string(buf), nil -} - -// Unmarshaler is the interface representing objects that can -// unmarshal themselves. The argument points to data that may be -// overwritten, so implementations should not keep references to the -// buffer. -// Unmarshal implementations should not clear the receiver. -// Any unmarshaled data should be merged into the receiver. -// Callers of Unmarshal that do not want to retain existing data -// should Reset the receiver before calling Unmarshal. -type Unmarshaler interface { - Unmarshal([]byte) error -} - -// newUnmarshaler is the interface representing objects that can -// unmarshal themselves. The semantics are identical to Unmarshaler. -// -// This exists to support protoc-gen-go generated messages. -// The proto package will stop type-asserting to this interface in the future. -// -// DO NOT DEPEND ON THIS. -type newUnmarshaler interface { - XXX_Unmarshal([]byte) error -} - -// Unmarshal parses the protocol buffer representation in buf and places the -// decoded result in pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// Unmarshal resets pb before starting to unmarshal, so any -// existing data in pb is always removed. Use UnmarshalMerge -// to preserve and append to existing data. -func Unmarshal(buf []byte, pb Message) error { - pb.Reset() - if u, ok := pb.(newUnmarshaler); ok { - return u.XXX_Unmarshal(buf) - } - if u, ok := pb.(Unmarshaler); ok { - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// UnmarshalMerge parses the protocol buffer representation in buf and -// writes the decoded result to pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// UnmarshalMerge merges into existing data in pb. -// Most code should use Unmarshal instead. -func UnmarshalMerge(buf []byte, pb Message) error { - if u, ok := pb.(newUnmarshaler); ok { - return u.XXX_Unmarshal(buf) - } - if u, ok := pb.(Unmarshaler); ok { - // NOTE: The history of proto have unfortunately been inconsistent - // whether Unmarshaler should or should not implicitly clear itself. - // Some implementations do, most do not. - // Thus, calling this here may or may not do what people want. - // - // See https://github.com/golang/protobuf/issues/424 - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// DecodeMessage reads a count-delimited message from the Buffer. -func (p *Buffer) DecodeMessage(pb Message) error { - enc, err := p.DecodeRawBytes(false) - if err != nil { - return err - } - return NewBuffer(enc).Unmarshal(pb) -} - -// DecodeGroup reads a tag-delimited group from the Buffer. -// StartGroup tag is already consumed. This function consumes -// EndGroup tag. -func (p *Buffer) DecodeGroup(pb Message) error { - b := p.buf[p.index:] - x, y := findEndGroup(b) - if x < 0 { - return io.ErrUnexpectedEOF - } - err := Unmarshal(b[:x], pb) - p.index += y - return err -} - -// Unmarshal parses the protocol buffer representation in the -// Buffer and places the decoded result in pb. If the struct -// underlying pb does not match the data in the buffer, the results can be -// unpredictable. -// -// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. -func (p *Buffer) Unmarshal(pb Message) error { - // If the object can unmarshal itself, let it. - if u, ok := pb.(newUnmarshaler); ok { - err := u.XXX_Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - if u, ok := pb.(Unmarshaler); ok { - // NOTE: The history of proto have unfortunately been inconsistent - // whether Unmarshaler should or should not implicitly clear itself. - // Some implementations do, most do not. - // Thus, calling this here may or may not do what people want. - // - // See https://github.com/golang/protobuf/issues/424 - err := u.Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - - // Slow workaround for messages that aren't Unmarshalers. - // This includes some hand-coded .pb.go files and - // bootstrap protos. - // TODO: fix all of those and then add Unmarshal to - // the Message interface. Then: - // The cast above and code below can be deleted. - // The old unmarshaler can be deleted. - // Clients can call Unmarshal directly (can already do that, actually). - var info InternalMessageInfo - err := info.Unmarshal(pb, p.buf[p.index:]) - p.index = len(p.buf) - return err -} diff --git a/cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/deprecated.go b/cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/deprecated.go deleted file mode 100644 index 35b882c..0000000 --- a/cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/deprecated.go +++ /dev/null @@ -1,63 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2018 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import "errors" - -// Deprecated: do not use. -type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } - -// Deprecated: do not use. -func GetStats() Stats { return Stats{} } - -// Deprecated: do not use. -func MarshalMessageSet(interface{}) ([]byte, error) { - return nil, errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func UnmarshalMessageSet([]byte, interface{}) error { - return errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func MarshalMessageSetJSON(interface{}) ([]byte, error) { - return nil, errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func UnmarshalMessageSetJSON([]byte, interface{}) error { - return errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func RegisterMessageSetType(Message, int32, string) {} diff --git a/cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/discard.go b/cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/discard.go deleted file mode 100644 index dea2617..0000000 --- a/cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/discard.go +++ /dev/null @@ -1,350 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2017 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "reflect" - "strings" - "sync" - "sync/atomic" -) - -type generatedDiscarder interface { - XXX_DiscardUnknown() -} - -// DiscardUnknown recursively discards all unknown fields from this message -// and all embedded messages. -// -// When unmarshaling a message with unrecognized fields, the tags and values -// of such fields are preserved in the Message. This allows a later call to -// marshal to be able to produce a message that continues to have those -// unrecognized fields. To avoid this, DiscardUnknown is used to -// explicitly clear the unknown fields after unmarshaling. -// -// For proto2 messages, the unknown fields of message extensions are only -// discarded from messages that have been accessed via GetExtension. -func DiscardUnknown(m Message) { - if m, ok := m.(generatedDiscarder); ok { - m.XXX_DiscardUnknown() - return - } - // TODO: Dynamically populate a InternalMessageInfo for legacy messages, - // but the master branch has no implementation for InternalMessageInfo, - // so it would be more work to replicate that approach. - discardLegacy(m) -} - -// DiscardUnknown recursively discards all unknown fields. -func (a *InternalMessageInfo) DiscardUnknown(m Message) { - di := atomicLoadDiscardInfo(&a.discard) - if di == nil { - di = getDiscardInfo(reflect.TypeOf(m).Elem()) - atomicStoreDiscardInfo(&a.discard, di) - } - di.discard(toPointer(&m)) -} - -type discardInfo struct { - typ reflect.Type - - initialized int32 // 0: only typ is valid, 1: everything is valid - lock sync.Mutex - - fields []discardFieldInfo - unrecognized field -} - -type discardFieldInfo struct { - field field // Offset of field, guaranteed to be valid - discard func(src pointer) -} - -var ( - discardInfoMap = map[reflect.Type]*discardInfo{} - discardInfoLock sync.Mutex -) - -func getDiscardInfo(t reflect.Type) *discardInfo { - discardInfoLock.Lock() - defer discardInfoLock.Unlock() - di := discardInfoMap[t] - if di == nil { - di = &discardInfo{typ: t} - discardInfoMap[t] = di - } - return di -} - -func (di *discardInfo) discard(src pointer) { - if src.isNil() { - return // Nothing to do. - } - - if atomic.LoadInt32(&di.initialized) == 0 { - di.computeDiscardInfo() - } - - for _, fi := range di.fields { - sfp := src.offset(fi.field) - fi.discard(sfp) - } - - // For proto2 messages, only discard unknown fields in message extensions - // that have been accessed via GetExtension. - if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil { - // Ignore lock since DiscardUnknown is not concurrency safe. - emm, _ := em.extensionsRead() - for _, mx := range emm { - if m, ok := mx.value.(Message); ok { - DiscardUnknown(m) - } - } - } - - if di.unrecognized.IsValid() { - *src.offset(di.unrecognized).toBytes() = nil - } -} - -func (di *discardInfo) computeDiscardInfo() { - di.lock.Lock() - defer di.lock.Unlock() - if di.initialized != 0 { - return - } - t := di.typ - n := t.NumField() - - for i := 0; i < n; i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - - dfi := discardFieldInfo{field: toField(&f)} - tf := f.Type - - // Unwrap tf to get its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name)) - } - - switch tf.Kind() { - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name)) - case isSlice: // E.g., []*pb.T - di := getDiscardInfo(tf) - dfi.discard = func(src pointer) { - sps := src.getPointerSlice() - for _, sp := range sps { - if !sp.isNil() { - di.discard(sp) - } - } - } - default: // E.g., *pb.T - di := getDiscardInfo(tf) - dfi.discard = func(src pointer) { - sp := src.getPointer() - if !sp.isNil() { - di.discard(sp) - } - } - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name)) - default: // E.g., map[K]V - if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T) - dfi.discard = func(src pointer) { - sm := src.asPointerTo(tf).Elem() - if sm.Len() == 0 { - return - } - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - DiscardUnknown(val.Interface().(Message)) - } - } - } else { - dfi.discard = func(pointer) {} // Noop - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name)) - default: // E.g., interface{} - // TODO: Make this faster? - dfi.discard = func(src pointer) { - su := src.asPointerTo(tf).Elem() - if !su.IsNil() { - sv := su.Elem().Elem().Field(0) - if sv.Kind() == reflect.Ptr && sv.IsNil() { - return - } - switch sv.Type().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - DiscardUnknown(sv.Interface().(Message)) - } - } - } - } - default: - continue - } - di.fields = append(di.fields, dfi) - } - - di.unrecognized = invalidField - if f, ok := t.FieldByName("XXX_unrecognized"); ok { - if f.Type != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - di.unrecognized = toField(&f) - } - - atomic.StoreInt32(&di.initialized, 1) -} - -func discardLegacy(m Message) { - v := reflect.ValueOf(m) - if v.Kind() != reflect.Ptr || v.IsNil() { - return - } - v = v.Elem() - if v.Kind() != reflect.Struct { - return - } - t := v.Type() - - for i := 0; i < v.NumField(); i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - vf := v.Field(i) - tf := f.Type - - // Unwrap tf to get its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name)) - } - - switch tf.Kind() { - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name)) - case isSlice: // E.g., []*pb.T - for j := 0; j < vf.Len(); j++ { - discardLegacy(vf.Index(j).Interface().(Message)) - } - default: // E.g., *pb.T - discardLegacy(vf.Interface().(Message)) - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name)) - default: // E.g., map[K]V - tv := vf.Type().Elem() - if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T) - for _, key := range vf.MapKeys() { - val := vf.MapIndex(key) - discardLegacy(val.Interface().(Message)) - } - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name)) - default: // E.g., test_proto.isCommunique_Union interface - if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" { - vf = vf.Elem() // E.g., *test_proto.Communique_Msg - if !vf.IsNil() { - vf = vf.Elem() // E.g., test_proto.Communique_Msg - vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value - if vf.Kind() == reflect.Ptr { - discardLegacy(vf.Interface().(Message)) - } - } - } - } - } - } - - if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() { - if vf.Type() != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - vf.Set(reflect.ValueOf([]byte(nil))) - } - - // For proto2 messages, only discard unknown fields in message extensions - // that have been accessed via GetExtension. - if em, err := extendable(m); err == nil { - // Ignore lock since discardLegacy is not concurrency safe. - emm, _ := em.extensionsRead() - for _, mx := range emm { - if m, ok := mx.value.(Message); ok { - discardLegacy(m) - } - } - } -} diff --git a/cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/encode.go b/cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/encode.go deleted file mode 100644 index 3abfed2..0000000 --- a/cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/encode.go +++ /dev/null @@ -1,203 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "errors" - "reflect" -) - -var ( - // errRepeatedHasNil is the error returned if Marshal is called with - // a struct with a repeated field containing a nil element. - errRepeatedHasNil = errors.New("proto: repeated field has nil element") - - // errOneofHasNil is the error returned if Marshal is called with - // a struct with a oneof field containing a nil element. - errOneofHasNil = errors.New("proto: oneof field has nil value") - - // ErrNil is the error returned if Marshal is called with nil. - ErrNil = errors.New("proto: Marshal called with nil") - - // ErrTooLarge is the error returned if Marshal is called with a - // message that encodes to >2GB. - ErrTooLarge = errors.New("proto: message encodes to over 2 GB") -) - -// The fundamental encoders that put bytes on the wire. -// Those that take integer types all accept uint64 and are -// therefore of type valueEncoder. - -const maxVarintBytes = 10 // maximum length of a varint - -// EncodeVarint returns the varint encoding of x. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -// Not used by the package itself, but helpful to clients -// wishing to use the same encoding. -func EncodeVarint(x uint64) []byte { - var buf [maxVarintBytes]byte - var n int - for n = 0; x > 127; n++ { - buf[n] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - buf[n] = uint8(x) - n++ - return buf[0:n] -} - -// EncodeVarint writes a varint-encoded integer to the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) EncodeVarint(x uint64) error { - for x >= 1<<7 { - p.buf = append(p.buf, uint8(x&0x7f|0x80)) - x >>= 7 - } - p.buf = append(p.buf, uint8(x)) - return nil -} - -// SizeVarint returns the varint encoding size of an integer. -func SizeVarint(x uint64) int { - switch { - case x < 1<<7: - return 1 - case x < 1<<14: - return 2 - case x < 1<<21: - return 3 - case x < 1<<28: - return 4 - case x < 1<<35: - return 5 - case x < 1<<42: - return 6 - case x < 1<<49: - return 7 - case x < 1<<56: - return 8 - case x < 1<<63: - return 9 - } - return 10 -} - -// EncodeFixed64 writes a 64-bit integer to the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) EncodeFixed64(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24), - uint8(x>>32), - uint8(x>>40), - uint8(x>>48), - uint8(x>>56)) - return nil -} - -// EncodeFixed32 writes a 32-bit integer to the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) EncodeFixed32(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24)) - return nil -} - -// EncodeZigzag64 writes a zigzag-encoded 64-bit integer -// to the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) EncodeZigzag64(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} - -// EncodeZigzag32 writes a zigzag-encoded 32-bit integer -// to the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) EncodeZigzag32(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - -// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) EncodeRawBytes(b []byte) error { - p.EncodeVarint(uint64(len(b))) - p.buf = append(p.buf, b...) - return nil -} - -// EncodeStringBytes writes an encoded string to the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) EncodeStringBytes(s string) error { - p.EncodeVarint(uint64(len(s))) - p.buf = append(p.buf, s...) - return nil -} - -// Marshaler is the interface representing objects that can marshal themselves. -type Marshaler interface { - Marshal() ([]byte, error) -} - -// EncodeMessage writes the protocol buffer to the Buffer, -// prefixed by a varint-encoded length. -func (p *Buffer) EncodeMessage(pb Message) error { - siz := Size(pb) - p.EncodeVarint(uint64(siz)) - return p.Marshal(pb) -} - -// All protocol buffer fields are nillable, but be careful. -func isNil(v reflect.Value) bool { - switch v.Kind() { - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return v.IsNil() - } - return false -} diff --git a/cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/equal.go b/cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/equal.go deleted file mode 100644 index f9b6e41..0000000 --- a/cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/equal.go +++ /dev/null @@ -1,301 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer comparison. - -package proto - -import ( - "bytes" - "log" - "reflect" - "strings" -) - -/* -Equal returns true iff protocol buffers a and b are equal. -The arguments must both be pointers to protocol buffer structs. - -Equality is defined in this way: - - Two messages are equal iff they are the same type, - corresponding fields are equal, unknown field sets - are equal, and extensions sets are equal. - - Two set scalar fields are equal iff their values are equal. - If the fields are of a floating-point type, remember that - NaN != x for all x, including NaN. If the message is defined - in a proto3 .proto file, fields are not "set"; specifically, - zero length proto3 "bytes" fields are equal (nil == {}). - - Two repeated fields are equal iff their lengths are the same, - and their corresponding elements are equal. Note a "bytes" field, - although represented by []byte, is not a repeated field and the - rule for the scalar fields described above applies. - - Two unset fields are equal. - - Two unknown field sets are equal if their current - encoded state is equal. - - Two extension sets are equal iff they have corresponding - elements that are pairwise equal. - - Two map fields are equal iff their lengths are the same, - and they contain the same set of elements. Zero-length map - fields are equal. - - Every other combination of things are not equal. - -The return value is undefined if a and b are not protocol buffers. -*/ -func Equal(a, b Message) bool { - if a == nil || b == nil { - return a == b - } - v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) - if v1.Type() != v2.Type() { - return false - } - if v1.Kind() == reflect.Ptr { - if v1.IsNil() { - return v2.IsNil() - } - if v2.IsNil() { - return false - } - v1, v2 = v1.Elem(), v2.Elem() - } - if v1.Kind() != reflect.Struct { - return false - } - return equalStruct(v1, v2) -} - -// v1 and v2 are known to have the same type. -func equalStruct(v1, v2 reflect.Value) bool { - sprop := GetProperties(v1.Type()) - for i := 0; i < v1.NumField(); i++ { - f := v1.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - f1, f2 := v1.Field(i), v2.Field(i) - if f.Type.Kind() == reflect.Ptr { - if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { - // both unset - continue - } else if n1 != n2 { - // set/unset mismatch - return false - } - f1, f2 = f1.Elem(), f2.Elem() - } - if !equalAny(f1, f2, sprop.Prop[i]) { - return false - } - } - - if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_InternalExtensions") - if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { - return false - } - } - - if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_extensions") - if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { - return false - } - } - - uf := v1.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return true - } - - u1 := uf.Bytes() - u2 := v2.FieldByName("XXX_unrecognized").Bytes() - return bytes.Equal(u1, u2) -} - -// v1 and v2 are known to have the same type. -// prop may be nil. -func equalAny(v1, v2 reflect.Value, prop *Properties) bool { - if v1.Type() == protoMessageType { - m1, _ := v1.Interface().(Message) - m2, _ := v2.Interface().(Message) - return Equal(m1, m2) - } - switch v1.Kind() { - case reflect.Bool: - return v1.Bool() == v2.Bool() - case reflect.Float32, reflect.Float64: - return v1.Float() == v2.Float() - case reflect.Int32, reflect.Int64: - return v1.Int() == v2.Int() - case reflect.Interface: - // Probably a oneof field; compare the inner values. - n1, n2 := v1.IsNil(), v2.IsNil() - if n1 || n2 { - return n1 == n2 - } - e1, e2 := v1.Elem(), v2.Elem() - if e1.Type() != e2.Type() { - return false - } - return equalAny(e1, e2, nil) - case reflect.Map: - if v1.Len() != v2.Len() { - return false - } - for _, key := range v1.MapKeys() { - val2 := v2.MapIndex(key) - if !val2.IsValid() { - // This key was not found in the second map. - return false - } - if !equalAny(v1.MapIndex(key), val2, nil) { - return false - } - } - return true - case reflect.Ptr: - // Maps may have nil values in them, so check for nil. - if v1.IsNil() && v2.IsNil() { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return equalAny(v1.Elem(), v2.Elem(), prop) - case reflect.Slice: - if v1.Type().Elem().Kind() == reflect.Uint8 { - // short circuit: []byte - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value. - if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) - } - - if v1.Len() != v2.Len() { - return false - } - for i := 0; i < v1.Len(); i++ { - if !equalAny(v1.Index(i), v2.Index(i), prop) { - return false - } - } - return true - case reflect.String: - return v1.Interface().(string) == v2.Interface().(string) - case reflect.Struct: - return equalStruct(v1, v2) - case reflect.Uint32, reflect.Uint64: - return v1.Uint() == v2.Uint() - } - - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to compare %v", v1) - return false -} - -// base is the struct type that the extensions are based on. -// x1 and x2 are InternalExtensions. -func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { - em1, _ := x1.extensionsRead() - em2, _ := x2.extensionsRead() - return equalExtMap(base, em1, em2) -} - -func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { - if len(em1) != len(em2) { - return false - } - - for extNum, e1 := range em1 { - e2, ok := em2[extNum] - if !ok { - return false - } - - m1 := extensionAsLegacyType(e1.value) - m2 := extensionAsLegacyType(e2.value) - - if m1 == nil && m2 == nil { - // Both have only encoded form. - if bytes.Equal(e1.enc, e2.enc) { - continue - } - // The bytes are different, but the extensions might still be - // equal. We need to decode them to compare. - } - - if m1 != nil && m2 != nil { - // Both are unencoded. - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - continue - } - - // At least one is encoded. To do a semantically correct comparison - // we need to unmarshal them first. - var desc *ExtensionDesc - if m := extensionMaps[base]; m != nil { - desc = m[extNum] - } - if desc == nil { - // If both have only encoded form and the bytes are the same, - // it is handled above. We get here when the bytes are different. - // We don't know how to decode it, so just compare them as byte - // slices. - log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) - return false - } - var err error - if m1 == nil { - m1, err = decodeExtension(e1.enc, desc) - } - if m2 == nil && err == nil { - m2, err = decodeExtension(e2.enc, desc) - } - if err != nil { - // The encoded form is invalid. - log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) - return false - } - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - } - - return true -} diff --git a/cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/extensions.go b/cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/extensions.go deleted file mode 100644 index fa88add..0000000 --- a/cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/extensions.go +++ /dev/null @@ -1,607 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Types and routines for supporting protocol buffer extensions. - */ - -import ( - "errors" - "fmt" - "io" - "reflect" - "strconv" - "sync" -) - -// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. -var ErrMissingExtension = errors.New("proto: missing extension") - -// ExtensionRange represents a range of message extensions for a protocol buffer. -// Used in code generated by the protocol compiler. -type ExtensionRange struct { - Start, End int32 // both inclusive -} - -// extendableProto is an interface implemented by any protocol buffer generated by the current -// proto compiler that may be extended. -type extendableProto interface { - Message - ExtensionRangeArray() []ExtensionRange - extensionsWrite() map[int32]Extension - extensionsRead() (map[int32]Extension, sync.Locker) -} - -// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous -// version of the proto compiler that may be extended. -type extendableProtoV1 interface { - Message - ExtensionRangeArray() []ExtensionRange - ExtensionMap() map[int32]Extension -} - -// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. -type extensionAdapter struct { - extendableProtoV1 -} - -func (e extensionAdapter) extensionsWrite() map[int32]Extension { - return e.ExtensionMap() -} - -func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { - return e.ExtensionMap(), notLocker{} -} - -// notLocker is a sync.Locker whose Lock and Unlock methods are nops. -type notLocker struct{} - -func (n notLocker) Lock() {} -func (n notLocker) Unlock() {} - -// extendable returns the extendableProto interface for the given generated proto message. -// If the proto message has the old extension format, it returns a wrapper that implements -// the extendableProto interface. -func extendable(p interface{}) (extendableProto, error) { - switch p := p.(type) { - case extendableProto: - if isNilPtr(p) { - return nil, fmt.Errorf("proto: nil %T is not extendable", p) - } - return p, nil - case extendableProtoV1: - if isNilPtr(p) { - return nil, fmt.Errorf("proto: nil %T is not extendable", p) - } - return extensionAdapter{p}, nil - } - // Don't allocate a specific error containing %T: - // this is the hot path for Clone and MarshalText. - return nil, errNotExtendable -} - -var errNotExtendable = errors.New("proto: not an extendable proto.Message") - -func isNilPtr(x interface{}) bool { - v := reflect.ValueOf(x) - return v.Kind() == reflect.Ptr && v.IsNil() -} - -// XXX_InternalExtensions is an internal representation of proto extensions. -// -// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, -// thus gaining the unexported 'extensions' method, which can be called only from the proto package. -// -// The methods of XXX_InternalExtensions are not concurrency safe in general, -// but calls to logically read-only methods such as has and get may be executed concurrently. -type XXX_InternalExtensions struct { - // The struct must be indirect so that if a user inadvertently copies a - // generated message and its embedded XXX_InternalExtensions, they - // avoid the mayhem of a copied mutex. - // - // The mutex serializes all logically read-only operations to p.extensionMap. - // It is up to the client to ensure that write operations to p.extensionMap are - // mutually exclusive with other accesses. - p *struct { - mu sync.Mutex - extensionMap map[int32]Extension - } -} - -// extensionsWrite returns the extension map, creating it on first use. -func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { - if e.p == nil { - e.p = new(struct { - mu sync.Mutex - extensionMap map[int32]Extension - }) - e.p.extensionMap = make(map[int32]Extension) - } - return e.p.extensionMap -} - -// extensionsRead returns the extensions map for read-only use. It may be nil. -// The caller must hold the returned mutex's lock when accessing Elements within the map. -func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { - if e.p == nil { - return nil, nil - } - return e.p.extensionMap, &e.p.mu -} - -// ExtensionDesc represents an extension specification. -// Used in generated code from the protocol compiler. -type ExtensionDesc struct { - ExtendedType Message // nil pointer to the type that is being extended - ExtensionType interface{} // nil pointer to the extension type - Field int32 // field number - Name string // fully-qualified name of extension, for text formatting - Tag string // protobuf tag style - Filename string // name of the file in which the extension is defined -} - -func (ed *ExtensionDesc) repeated() bool { - t := reflect.TypeOf(ed.ExtensionType) - return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 -} - -// Extension represents an extension in a message. -type Extension struct { - // When an extension is stored in a message using SetExtension - // only desc and value are set. When the message is marshaled - // enc will be set to the encoded form of the message. - // - // When a message is unmarshaled and contains extensions, each - // extension will have only enc set. When such an extension is - // accessed using GetExtension (or GetExtensions) desc and value - // will be set. - desc *ExtensionDesc - - // value is a concrete value for the extension field. Let the type of - // desc.ExtensionType be the "API type" and the type of Extension.value - // be the "storage type". The API type and storage type are the same except: - // * For scalars (except []byte), the API type uses *T, - // while the storage type uses T. - // * For repeated fields, the API type uses []T, while the storage type - // uses *[]T. - // - // The reason for the divergence is so that the storage type more naturally - // matches what is expected of when retrieving the values through the - // protobuf reflection APIs. - // - // The value may only be populated if desc is also populated. - value interface{} - - // enc is the raw bytes for the extension field. - enc []byte -} - -// SetRawExtension is for testing only. -func SetRawExtension(base Message, id int32, b []byte) { - epb, err := extendable(base) - if err != nil { - return - } - extmap := epb.extensionsWrite() - extmap[id] = Extension{enc: b} -} - -// isExtensionField returns true iff the given field number is in an extension range. -func isExtensionField(pb extendableProto, field int32) bool { - for _, er := range pb.ExtensionRangeArray() { - if er.Start <= field && field <= er.End { - return true - } - } - return false -} - -// checkExtensionTypes checks that the given extension is valid for pb. -func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { - var pbi interface{} = pb - // Check the extended type. - if ea, ok := pbi.(extensionAdapter); ok { - pbi = ea.extendableProtoV1 - } - if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { - return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a) - } - // Check the range. - if !isExtensionField(pb, extension.Field) { - return errors.New("proto: bad extension number; not in declared ranges") - } - return nil -} - -// extPropKey is sufficient to uniquely identify an extension. -type extPropKey struct { - base reflect.Type - field int32 -} - -var extProp = struct { - sync.RWMutex - m map[extPropKey]*Properties -}{ - m: make(map[extPropKey]*Properties), -} - -func extensionProperties(ed *ExtensionDesc) *Properties { - key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} - - extProp.RLock() - if prop, ok := extProp.m[key]; ok { - extProp.RUnlock() - return prop - } - extProp.RUnlock() - - extProp.Lock() - defer extProp.Unlock() - // Check again. - if prop, ok := extProp.m[key]; ok { - return prop - } - - prop := new(Properties) - prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) - extProp.m[key] = prop - return prop -} - -// HasExtension returns whether the given extension is present in pb. -func HasExtension(pb Message, extension *ExtensionDesc) bool { - // TODO: Check types, field numbers, etc.? - epb, err := extendable(pb) - if err != nil { - return false - } - extmap, mu := epb.extensionsRead() - if extmap == nil { - return false - } - mu.Lock() - _, ok := extmap[extension.Field] - mu.Unlock() - return ok -} - -// ClearExtension removes the given extension from pb. -func ClearExtension(pb Message, extension *ExtensionDesc) { - epb, err := extendable(pb) - if err != nil { - return - } - // TODO: Check types, field numbers, etc.? - extmap := epb.extensionsWrite() - delete(extmap, extension.Field) -} - -// GetExtension retrieves a proto2 extended field from pb. -// -// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), -// then GetExtension parses the encoded field and returns a Go value of the specified type. -// If the field is not present, then the default value is returned (if one is specified), -// otherwise ErrMissingExtension is reported. -// -// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil), -// then GetExtension returns the raw encoded bytes of the field extension. -func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { - epb, err := extendable(pb) - if err != nil { - return nil, err - } - - if extension.ExtendedType != nil { - // can only check type if this is a complete descriptor - if err := checkExtensionTypes(epb, extension); err != nil { - return nil, err - } - } - - emap, mu := epb.extensionsRead() - if emap == nil { - return defaultExtensionValue(extension) - } - mu.Lock() - defer mu.Unlock() - e, ok := emap[extension.Field] - if !ok { - // defaultExtensionValue returns the default value or - // ErrMissingExtension if there is no default. - return defaultExtensionValue(extension) - } - - if e.value != nil { - // Already decoded. Check the descriptor, though. - if e.desc != extension { - // This shouldn't happen. If it does, it means that - // GetExtension was called twice with two different - // descriptors with the same field number. - return nil, errors.New("proto: descriptor conflict") - } - return extensionAsLegacyType(e.value), nil - } - - if extension.ExtensionType == nil { - // incomplete descriptor - return e.enc, nil - } - - v, err := decodeExtension(e.enc, extension) - if err != nil { - return nil, err - } - - // Remember the decoded version and drop the encoded version. - // That way it is safe to mutate what we return. - e.value = extensionAsStorageType(v) - e.desc = extension - e.enc = nil - emap[extension.Field] = e - return extensionAsLegacyType(e.value), nil -} - -// defaultExtensionValue returns the default value for extension. -// If no default for an extension is defined ErrMissingExtension is returned. -func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { - if extension.ExtensionType == nil { - // incomplete descriptor, so no default - return nil, ErrMissingExtension - } - - t := reflect.TypeOf(extension.ExtensionType) - props := extensionProperties(extension) - - sf, _, err := fieldDefault(t, props) - if err != nil { - return nil, err - } - - if sf == nil || sf.value == nil { - // There is no default value. - return nil, ErrMissingExtension - } - - if t.Kind() != reflect.Ptr { - // We do not need to return a Ptr, we can directly return sf.value. - return sf.value, nil - } - - // We need to return an interface{} that is a pointer to sf.value. - value := reflect.New(t).Elem() - value.Set(reflect.New(value.Type().Elem())) - if sf.kind == reflect.Int32 { - // We may have an int32 or an enum, but the underlying data is int32. - // Since we can't set an int32 into a non int32 reflect.value directly - // set it as a int32. - value.Elem().SetInt(int64(sf.value.(int32))) - } else { - value.Elem().Set(reflect.ValueOf(sf.value)) - } - return value.Interface(), nil -} - -// decodeExtension decodes an extension encoded in b. -func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { - t := reflect.TypeOf(extension.ExtensionType) - unmarshal := typeUnmarshaler(t, extension.Tag) - - // t is a pointer to a struct, pointer to basic type or a slice. - // Allocate space to store the pointer/slice. - value := reflect.New(t).Elem() - - var err error - for { - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - wire := int(x) & 7 - - b, err = unmarshal(b, valToPointer(value.Addr()), wire) - if err != nil { - return nil, err - } - - if len(b) == 0 { - break - } - } - return value.Interface(), nil -} - -// GetExtensions returns a slice of the extensions present in pb that are also listed in es. -// The returned slice has the same length as es; missing extensions will appear as nil elements. -func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { - epb, err := extendable(pb) - if err != nil { - return nil, err - } - extensions = make([]interface{}, len(es)) - for i, e := range es { - extensions[i], err = GetExtension(epb, e) - if err == ErrMissingExtension { - err = nil - } - if err != nil { - return - } - } - return -} - -// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. -// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing -// just the Field field, which defines the extension's field number. -func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { - epb, err := extendable(pb) - if err != nil { - return nil, err - } - registeredExtensions := RegisteredExtensions(pb) - - emap, mu := epb.extensionsRead() - if emap == nil { - return nil, nil - } - mu.Lock() - defer mu.Unlock() - extensions := make([]*ExtensionDesc, 0, len(emap)) - for extid, e := range emap { - desc := e.desc - if desc == nil { - desc = registeredExtensions[extid] - if desc == nil { - desc = &ExtensionDesc{Field: extid} - } - } - - extensions = append(extensions, desc) - } - return extensions, nil -} - -// SetExtension sets the specified extension of pb to the specified value. -func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { - epb, err := extendable(pb) - if err != nil { - return err - } - if err := checkExtensionTypes(epb, extension); err != nil { - return err - } - typ := reflect.TypeOf(extension.ExtensionType) - if typ != reflect.TypeOf(value) { - return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType) - } - // nil extension values need to be caught early, because the - // encoder can't distinguish an ErrNil due to a nil extension - // from an ErrNil due to a missing field. Extensions are - // always optional, so the encoder would just swallow the error - // and drop all the extensions from the encoded message. - if reflect.ValueOf(value).IsNil() { - return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) - } - - extmap := epb.extensionsWrite() - extmap[extension.Field] = Extension{desc: extension, value: extensionAsStorageType(value)} - return nil -} - -// ClearAllExtensions clears all extensions from pb. -func ClearAllExtensions(pb Message) { - epb, err := extendable(pb) - if err != nil { - return - } - m := epb.extensionsWrite() - for k := range m { - delete(m, k) - } -} - -// A global registry of extensions. -// The generated code will register the generated descriptors by calling RegisterExtension. - -var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) - -// RegisterExtension is called from the generated code. -func RegisterExtension(desc *ExtensionDesc) { - st := reflect.TypeOf(desc.ExtendedType).Elem() - m := extensionMaps[st] - if m == nil { - m = make(map[int32]*ExtensionDesc) - extensionMaps[st] = m - } - if _, ok := m[desc.Field]; ok { - panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) - } - m[desc.Field] = desc -} - -// RegisteredExtensions returns a map of the registered extensions of a -// protocol buffer struct, indexed by the extension number. -// The argument pb should be a nil pointer to the struct type. -func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { - return extensionMaps[reflect.TypeOf(pb).Elem()] -} - -// extensionAsLegacyType converts an value in the storage type as the API type. -// See Extension.value. -func extensionAsLegacyType(v interface{}) interface{} { - switch rv := reflect.ValueOf(v); rv.Kind() { - case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: - // Represent primitive types as a pointer to the value. - rv2 := reflect.New(rv.Type()) - rv2.Elem().Set(rv) - v = rv2.Interface() - case reflect.Ptr: - // Represent slice types as the value itself. - switch rv.Type().Elem().Kind() { - case reflect.Slice: - if rv.IsNil() { - v = reflect.Zero(rv.Type().Elem()).Interface() - } else { - v = rv.Elem().Interface() - } - } - } - return v -} - -// extensionAsStorageType converts an value in the API type as the storage type. -// See Extension.value. -func extensionAsStorageType(v interface{}) interface{} { - switch rv := reflect.ValueOf(v); rv.Kind() { - case reflect.Ptr: - // Represent slice types as the value itself. - switch rv.Type().Elem().Kind() { - case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: - if rv.IsNil() { - v = reflect.Zero(rv.Type().Elem()).Interface() - } else { - v = rv.Elem().Interface() - } - } - case reflect.Slice: - // Represent slice types as a pointer to the value. - if rv.Type().Elem().Kind() != reflect.Uint8 { - rv2 := reflect.New(rv.Type()) - rv2.Elem().Set(rv) - v = rv2.Interface() - } - } - return v -} diff --git a/cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/lib.go b/cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/lib.go deleted file mode 100644 index fdd328b..0000000 --- a/cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/lib.go +++ /dev/null @@ -1,965 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package proto converts data structures to and from the wire format of -protocol buffers. It works in concert with the Go source code generated -for .proto files by the protocol compiler. - -A summary of the properties of the protocol buffer interface -for a protocol buffer variable v: - - - Names are turned from camel_case to CamelCase for export. - - There are no methods on v to set fields; just treat - them as structure fields. - - There are getters that return a field's value if set, - and return the field's default value if unset. - The getters work even if the receiver is a nil message. - - The zero value for a struct is its correct initialization state. - All desired fields must be set before marshaling. - - A Reset() method will restore a protobuf struct to its zero state. - - Non-repeated fields are pointers to the values; nil means unset. - That is, optional or required field int32 f becomes F *int32. - - Repeated fields are slices. - - Helper functions are available to aid the setting of fields. - msg.Foo = proto.String("hello") // set field - - Constants are defined to hold the default values of all fields that - have them. They have the form Default_StructName_FieldName. - Because the getter methods handle defaulted values, - direct use of these constants should be rare. - - Enums are given type names and maps from names to values. - Enum values are prefixed by the enclosing message's name, or by the - enum's type name if it is a top-level enum. Enum types have a String - method, and a Enum method to assist in message construction. - - Nested messages, groups and enums have type names prefixed with the name of - the surrounding message type. - - Extensions are given descriptor names that start with E_, - followed by an underscore-delimited list of the nested messages - that contain it (if any) followed by the CamelCased name of the - extension field itself. HasExtension, ClearExtension, GetExtension - and SetExtension are functions for manipulating extensions. - - Oneof field sets are given a single field in their message, - with distinguished wrapper types for each possible field value. - - Marshal and Unmarshal are functions to encode and decode the wire format. - -When the .proto file specifies `syntax="proto3"`, there are some differences: - - - Non-repeated fields of non-message type are values instead of pointers. - - Enum types do not get an Enum method. - -The simplest way to describe this is to see an example. -Given file test.proto, containing - - package example; - - enum FOO { X = 17; } - - message Test { - required string label = 1; - optional int32 type = 2 [default=77]; - repeated int64 reps = 3; - optional group OptionalGroup = 4 { - required string RequiredField = 5; - } - oneof union { - int32 number = 6; - string name = 7; - } - } - -The resulting file, test.pb.go, is: - - package example - - import proto "github.com/golang/protobuf/proto" - import math "math" - - type FOO int32 - const ( - FOO_X FOO = 17 - ) - var FOO_name = map[int32]string{ - 17: "X", - } - var FOO_value = map[string]int32{ - "X": 17, - } - - func (x FOO) Enum() *FOO { - p := new(FOO) - *p = x - return p - } - func (x FOO) String() string { - return proto.EnumName(FOO_name, int32(x)) - } - func (x *FOO) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FOO_value, data) - if err != nil { - return err - } - *x = FOO(value) - return nil - } - - type Test struct { - Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` - Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` - Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` - Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` - // Types that are valid to be assigned to Union: - // *Test_Number - // *Test_Name - Union isTest_Union `protobuf_oneof:"union"` - XXX_unrecognized []byte `json:"-"` - } - func (m *Test) Reset() { *m = Test{} } - func (m *Test) String() string { return proto.CompactTextString(m) } - func (*Test) ProtoMessage() {} - - type isTest_Union interface { - isTest_Union() - } - - type Test_Number struct { - Number int32 `protobuf:"varint,6,opt,name=number"` - } - type Test_Name struct { - Name string `protobuf:"bytes,7,opt,name=name"` - } - - func (*Test_Number) isTest_Union() {} - func (*Test_Name) isTest_Union() {} - - func (m *Test) GetUnion() isTest_Union { - if m != nil { - return m.Union - } - return nil - } - const Default_Test_Type int32 = 77 - - func (m *Test) GetLabel() string { - if m != nil && m.Label != nil { - return *m.Label - } - return "" - } - - func (m *Test) GetType() int32 { - if m != nil && m.Type != nil { - return *m.Type - } - return Default_Test_Type - } - - func (m *Test) GetOptionalgroup() *Test_OptionalGroup { - if m != nil { - return m.Optionalgroup - } - return nil - } - - type Test_OptionalGroup struct { - RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` - } - func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } - func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } - - func (m *Test_OptionalGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" - } - - func (m *Test) GetNumber() int32 { - if x, ok := m.GetUnion().(*Test_Number); ok { - return x.Number - } - return 0 - } - - func (m *Test) GetName() string { - if x, ok := m.GetUnion().(*Test_Name); ok { - return x.Name - } - return "" - } - - func init() { - proto.RegisterEnum("example.FOO", FOO_name, FOO_value) - } - -To create and play with a Test object: - - package main - - import ( - "log" - - "github.com/golang/protobuf/proto" - pb "./example.pb" - ) - - func main() { - test := &pb.Test{ - Label: proto.String("hello"), - Type: proto.Int32(17), - Reps: []int64{1, 2, 3}, - Optionalgroup: &pb.Test_OptionalGroup{ - RequiredField: proto.String("good bye"), - }, - Union: &pb.Test_Name{"fred"}, - } - data, err := proto.Marshal(test) - if err != nil { - log.Fatal("marshaling error: ", err) - } - newTest := &pb.Test{} - err = proto.Unmarshal(data, newTest) - if err != nil { - log.Fatal("unmarshaling error: ", err) - } - // Now test and newTest contain the same data. - if test.GetLabel() != newTest.GetLabel() { - log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) - } - // Use a type switch to determine which oneof was set. - switch u := test.Union.(type) { - case *pb.Test_Number: // u.Number contains the number. - case *pb.Test_Name: // u.Name contains the string. - } - // etc. - } -*/ -package proto - -import ( - "encoding/json" - "fmt" - "log" - "reflect" - "sort" - "strconv" - "sync" -) - -// RequiredNotSetError is an error type returned by either Marshal or Unmarshal. -// Marshal reports this when a required field is not initialized. -// Unmarshal reports this when a required field is missing from the wire data. -type RequiredNotSetError struct{ field string } - -func (e *RequiredNotSetError) Error() string { - if e.field == "" { - return fmt.Sprintf("proto: required field not set") - } - return fmt.Sprintf("proto: required field %q not set", e.field) -} -func (e *RequiredNotSetError) RequiredNotSet() bool { - return true -} - -type invalidUTF8Error struct{ field string } - -func (e *invalidUTF8Error) Error() string { - if e.field == "" { - return "proto: invalid UTF-8 detected" - } - return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field) -} -func (e *invalidUTF8Error) InvalidUTF8() bool { - return true -} - -// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8. -// This error should not be exposed to the external API as such errors should -// be recreated with the field information. -var errInvalidUTF8 = &invalidUTF8Error{} - -// isNonFatal reports whether the error is either a RequiredNotSet error -// or a InvalidUTF8 error. -func isNonFatal(err error) bool { - if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() { - return true - } - if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() { - return true - } - return false -} - -type nonFatal struct{ E error } - -// Merge merges err into nf and reports whether it was successful. -// Otherwise it returns false for any fatal non-nil errors. -func (nf *nonFatal) Merge(err error) (ok bool) { - if err == nil { - return true // not an error - } - if !isNonFatal(err) { - return false // fatal error - } - if nf.E == nil { - nf.E = err // store first instance of non-fatal error - } - return true -} - -// Message is implemented by generated protocol buffer messages. -type Message interface { - Reset() - String() string - ProtoMessage() -} - -// A Buffer is a buffer manager for marshaling and unmarshaling -// protocol buffers. It may be reused between invocations to -// reduce memory usage. It is not necessary to use a Buffer; -// the global functions Marshal and Unmarshal create a -// temporary Buffer and are fine for most applications. -type Buffer struct { - buf []byte // encode/decode byte stream - index int // read point - - deterministic bool -} - -// NewBuffer allocates a new Buffer and initializes its internal data to -// the contents of the argument slice. -func NewBuffer(e []byte) *Buffer { - return &Buffer{buf: e} -} - -// Reset resets the Buffer, ready for marshaling a new protocol buffer. -func (p *Buffer) Reset() { - p.buf = p.buf[0:0] // for reading/writing - p.index = 0 // for reading -} - -// SetBuf replaces the internal buffer with the slice, -// ready for unmarshaling the contents of the slice. -func (p *Buffer) SetBuf(s []byte) { - p.buf = s - p.index = 0 -} - -// Bytes returns the contents of the Buffer. -func (p *Buffer) Bytes() []byte { return p.buf } - -// SetDeterministic sets whether to use deterministic serialization. -// -// Deterministic serialization guarantees that for a given binary, equal -// messages will always be serialized to the same bytes. This implies: -// -// - Repeated serialization of a message will return the same bytes. -// - Different processes of the same binary (which may be executing on -// different machines) will serialize equal messages to the same bytes. -// -// Note that the deterministic serialization is NOT canonical across -// languages. It is not guaranteed to remain stable over time. It is unstable -// across different builds with schema changes due to unknown fields. -// Users who need canonical serialization (e.g., persistent storage in a -// canonical form, fingerprinting, etc.) should define their own -// canonicalization specification and implement their own serializer rather -// than relying on this API. -// -// If deterministic serialization is requested, map entries will be sorted -// by keys in lexographical order. This is an implementation detail and -// subject to change. -func (p *Buffer) SetDeterministic(deterministic bool) { - p.deterministic = deterministic -} - -/* - * Helper routines for simplifying the creation of optional fields of basic type. - */ - -// Bool is a helper routine that allocates a new bool value -// to store v and returns a pointer to it. -func Bool(v bool) *bool { - return &v -} - -// Int32 is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it. -func Int32(v int32) *int32 { - return &v -} - -// Int is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it, but unlike Int32 -// its argument value is an int. -func Int(v int) *int32 { - p := new(int32) - *p = int32(v) - return p -} - -// Int64 is a helper routine that allocates a new int64 value -// to store v and returns a pointer to it. -func Int64(v int64) *int64 { - return &v -} - -// Float32 is a helper routine that allocates a new float32 value -// to store v and returns a pointer to it. -func Float32(v float32) *float32 { - return &v -} - -// Float64 is a helper routine that allocates a new float64 value -// to store v and returns a pointer to it. -func Float64(v float64) *float64 { - return &v -} - -// Uint32 is a helper routine that allocates a new uint32 value -// to store v and returns a pointer to it. -func Uint32(v uint32) *uint32 { - return &v -} - -// Uint64 is a helper routine that allocates a new uint64 value -// to store v and returns a pointer to it. -func Uint64(v uint64) *uint64 { - return &v -} - -// String is a helper routine that allocates a new string value -// to store v and returns a pointer to it. -func String(v string) *string { - return &v -} - -// EnumName is a helper function to simplify printing protocol buffer enums -// by name. Given an enum map and a value, it returns a useful string. -func EnumName(m map[int32]string, v int32) string { - s, ok := m[v] - if ok { - return s - } - return strconv.Itoa(int(v)) -} - -// UnmarshalJSONEnum is a helper function to simplify recovering enum int values -// from their JSON-encoded representation. Given a map from the enum's symbolic -// names to its int values, and a byte buffer containing the JSON-encoded -// value, it returns an int32 that can be cast to the enum type by the caller. -// -// The function can deal with both JSON representations, numeric and symbolic. -func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { - if data[0] == '"' { - // New style: enums are strings. - var repr string - if err := json.Unmarshal(data, &repr); err != nil { - return -1, err - } - val, ok := m[repr] - if !ok { - return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) - } - return val, nil - } - // Old style: enums are ints. - var val int32 - if err := json.Unmarshal(data, &val); err != nil { - return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) - } - return val, nil -} - -// DebugPrint dumps the encoded data in b in a debugging format with a header -// including the string s. Used in testing but made available for general debugging. -func (p *Buffer) DebugPrint(s string, b []byte) { - var u uint64 - - obuf := p.buf - index := p.index - p.buf = b - p.index = 0 - depth := 0 - - fmt.Printf("\n--- %s ---\n", s) - -out: - for { - for i := 0; i < depth; i++ { - fmt.Print(" ") - } - - index := p.index - if index == len(p.buf) { - break - } - - op, err := p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: fetching op err %v\n", index, err) - break out - } - tag := op >> 3 - wire := op & 7 - - switch wire { - default: - fmt.Printf("%3d: t=%3d unknown wire=%d\n", - index, tag, wire) - break out - - case WireBytes: - var r []byte - - r, err = p.DecodeRawBytes(false) - if err != nil { - break out - } - fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) - if len(r) <= 6 { - for i := 0; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } else { - for i := 0; i < 3; i++ { - fmt.Printf(" %.2x", r[i]) - } - fmt.Printf(" ..") - for i := len(r) - 3; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } - fmt.Printf("\n") - - case WireFixed32: - u, err = p.DecodeFixed32() - if err != nil { - fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) - - case WireFixed64: - u, err = p.DecodeFixed64() - if err != nil { - fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) - - case WireVarint: - u, err = p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) - - case WireStartGroup: - fmt.Printf("%3d: t=%3d start\n", index, tag) - depth++ - - case WireEndGroup: - depth-- - fmt.Printf("%3d: t=%3d end\n", index, tag) - } - } - - if depth != 0 { - fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) - } - fmt.Printf("\n") - - p.buf = obuf - p.index = index -} - -// SetDefaults sets unset protocol buffer fields to their default values. -// It only modifies fields that are both unset and have defined defaults. -// It recursively sets default values in any non-nil sub-messages. -func SetDefaults(pb Message) { - setDefaults(reflect.ValueOf(pb), true, false) -} - -// v is a pointer to a struct. -func setDefaults(v reflect.Value, recur, zeros bool) { - v = v.Elem() - - defaultMu.RLock() - dm, ok := defaults[v.Type()] - defaultMu.RUnlock() - if !ok { - dm = buildDefaultMessage(v.Type()) - defaultMu.Lock() - defaults[v.Type()] = dm - defaultMu.Unlock() - } - - for _, sf := range dm.scalars { - f := v.Field(sf.index) - if !f.IsNil() { - // field already set - continue - } - dv := sf.value - if dv == nil && !zeros { - // no explicit default, and don't want to set zeros - continue - } - fptr := f.Addr().Interface() // **T - // TODO: Consider batching the allocations we do here. - switch sf.kind { - case reflect.Bool: - b := new(bool) - if dv != nil { - *b = dv.(bool) - } - *(fptr.(**bool)) = b - case reflect.Float32: - f := new(float32) - if dv != nil { - *f = dv.(float32) - } - *(fptr.(**float32)) = f - case reflect.Float64: - f := new(float64) - if dv != nil { - *f = dv.(float64) - } - *(fptr.(**float64)) = f - case reflect.Int32: - // might be an enum - if ft := f.Type(); ft != int32PtrType { - // enum - f.Set(reflect.New(ft.Elem())) - if dv != nil { - f.Elem().SetInt(int64(dv.(int32))) - } - } else { - // int32 field - i := new(int32) - if dv != nil { - *i = dv.(int32) - } - *(fptr.(**int32)) = i - } - case reflect.Int64: - i := new(int64) - if dv != nil { - *i = dv.(int64) - } - *(fptr.(**int64)) = i - case reflect.String: - s := new(string) - if dv != nil { - *s = dv.(string) - } - *(fptr.(**string)) = s - case reflect.Uint8: - // exceptional case: []byte - var b []byte - if dv != nil { - db := dv.([]byte) - b = make([]byte, len(db)) - copy(b, db) - } else { - b = []byte{} - } - *(fptr.(*[]byte)) = b - case reflect.Uint32: - u := new(uint32) - if dv != nil { - *u = dv.(uint32) - } - *(fptr.(**uint32)) = u - case reflect.Uint64: - u := new(uint64) - if dv != nil { - *u = dv.(uint64) - } - *(fptr.(**uint64)) = u - default: - log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) - } - } - - for _, ni := range dm.nested { - f := v.Field(ni) - // f is *T or []*T or map[T]*T - switch f.Kind() { - case reflect.Ptr: - if f.IsNil() { - continue - } - setDefaults(f, recur, zeros) - - case reflect.Slice: - for i := 0; i < f.Len(); i++ { - e := f.Index(i) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - - case reflect.Map: - for _, k := range f.MapKeys() { - e := f.MapIndex(k) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - } - } -} - -var ( - // defaults maps a protocol buffer struct type to a slice of the fields, - // with its scalar fields set to their proto-declared non-zero default values. - defaultMu sync.RWMutex - defaults = make(map[reflect.Type]defaultMessage) - - int32PtrType = reflect.TypeOf((*int32)(nil)) -) - -// defaultMessage represents information about the default values of a message. -type defaultMessage struct { - scalars []scalarField - nested []int // struct field index of nested messages -} - -type scalarField struct { - index int // struct field index - kind reflect.Kind // element type (the T in *T or []T) - value interface{} // the proto-declared default value, or nil -} - -// t is a struct type. -func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { - sprop := GetProperties(t) - for _, prop := range sprop.Prop { - fi, ok := sprop.decoderTags.get(prop.Tag) - if !ok { - // XXX_unrecognized - continue - } - ft := t.Field(fi).Type - - sf, nested, err := fieldDefault(ft, prop) - switch { - case err != nil: - log.Print(err) - case nested: - dm.nested = append(dm.nested, fi) - case sf != nil: - sf.index = fi - dm.scalars = append(dm.scalars, *sf) - } - } - - return dm -} - -// fieldDefault returns the scalarField for field type ft. -// sf will be nil if the field can not have a default. -// nestedMessage will be true if this is a nested message. -// Note that sf.index is not set on return. -func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { - var canHaveDefault bool - switch ft.Kind() { - case reflect.Ptr: - if ft.Elem().Kind() == reflect.Struct { - nestedMessage = true - } else { - canHaveDefault = true // proto2 scalar field - } - - case reflect.Slice: - switch ft.Elem().Kind() { - case reflect.Ptr: - nestedMessage = true // repeated message - case reflect.Uint8: - canHaveDefault = true // bytes field - } - - case reflect.Map: - if ft.Elem().Kind() == reflect.Ptr { - nestedMessage = true // map with message values - } - } - - if !canHaveDefault { - if nestedMessage { - return nil, true, nil - } - return nil, false, nil - } - - // We now know that ft is a pointer or slice. - sf = &scalarField{kind: ft.Elem().Kind()} - - // scalar fields without defaults - if !prop.HasDefault { - return sf, false, nil - } - - // a scalar field: either *T or []byte - switch ft.Elem().Kind() { - case reflect.Bool: - x, err := strconv.ParseBool(prop.Default) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Float32: - x, err := strconv.ParseFloat(prop.Default, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) - } - sf.value = float32(x) - case reflect.Float64: - x, err := strconv.ParseFloat(prop.Default, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Int32: - x, err := strconv.ParseInt(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) - } - sf.value = int32(x) - case reflect.Int64: - x, err := strconv.ParseInt(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.String: - sf.value = prop.Default - case reflect.Uint8: - // []byte (not *uint8) - sf.value = []byte(prop.Default) - case reflect.Uint32: - x, err := strconv.ParseUint(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) - } - sf.value = uint32(x) - case reflect.Uint64: - x, err := strconv.ParseUint(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) - } - sf.value = x - default: - return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) - } - - return sf, false, nil -} - -// mapKeys returns a sort.Interface to be used for sorting the map keys. -// Map fields may have key types of non-float scalars, strings and enums. -func mapKeys(vs []reflect.Value) sort.Interface { - s := mapKeySorter{vs: vs} - - // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps. - if len(vs) == 0 { - return s - } - switch vs[0].Kind() { - case reflect.Int32, reflect.Int64: - s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } - case reflect.Uint32, reflect.Uint64: - s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } - case reflect.Bool: - s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true - case reflect.String: - s.less = func(a, b reflect.Value) bool { return a.String() < b.String() } - default: - panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind())) - } - - return s -} - -type mapKeySorter struct { - vs []reflect.Value - less func(a, b reflect.Value) bool -} - -func (s mapKeySorter) Len() int { return len(s.vs) } -func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } -func (s mapKeySorter) Less(i, j int) bool { - return s.less(s.vs[i], s.vs[j]) -} - -// isProto3Zero reports whether v is a zero proto3 value. -func isProto3Zero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return !v.Bool() - case reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint32, reflect.Uint64: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.String: - return v.String() == "" - } - return false -} - -const ( - // ProtoPackageIsVersion3 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - ProtoPackageIsVersion3 = true - - // ProtoPackageIsVersion2 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - ProtoPackageIsVersion2 = true - - // ProtoPackageIsVersion1 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - ProtoPackageIsVersion1 = true -) - -// InternalMessageInfo is a type used internally by generated .pb.go files. -// This type is not intended to be used by non-generated code. -// This type is not subject to any compatibility guarantee. -type InternalMessageInfo struct { - marshal *marshalInfo - unmarshal *unmarshalInfo - merge *mergeInfo - discard *discardInfo -} diff --git a/cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/message_set.go b/cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/message_set.go deleted file mode 100644 index f48a756..0000000 --- a/cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/message_set.go +++ /dev/null @@ -1,181 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Support for message sets. - */ - -import ( - "errors" -) - -// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. -// A message type ID is required for storing a protocol buffer in a message set. -var errNoMessageTypeID = errors.New("proto does not have a message type ID") - -// The first two types (_MessageSet_Item and messageSet) -// model what the protocol compiler produces for the following protocol message: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } -// That is the MessageSet wire format. We can't use a proto to generate these -// because that would introduce a circular dependency between it and this package. - -type _MessageSet_Item struct { - TypeId *int32 `protobuf:"varint,2,req,name=type_id"` - Message []byte `protobuf:"bytes,3,req,name=message"` -} - -type messageSet struct { - Item []*_MessageSet_Item `protobuf:"group,1,rep"` - XXX_unrecognized []byte - // TODO: caching? -} - -// Make sure messageSet is a Message. -var _ Message = (*messageSet)(nil) - -// messageTypeIder is an interface satisfied by a protocol buffer type -// that may be stored in a MessageSet. -type messageTypeIder interface { - MessageTypeId() int32 -} - -func (ms *messageSet) find(pb Message) *_MessageSet_Item { - mti, ok := pb.(messageTypeIder) - if !ok { - return nil - } - id := mti.MessageTypeId() - for _, item := range ms.Item { - if *item.TypeId == id { - return item - } - } - return nil -} - -func (ms *messageSet) Has(pb Message) bool { - return ms.find(pb) != nil -} - -func (ms *messageSet) Unmarshal(pb Message) error { - if item := ms.find(pb); item != nil { - return Unmarshal(item.Message, pb) - } - if _, ok := pb.(messageTypeIder); !ok { - return errNoMessageTypeID - } - return nil // TODO: return error instead? -} - -func (ms *messageSet) Marshal(pb Message) error { - msg, err := Marshal(pb) - if err != nil { - return err - } - if item := ms.find(pb); item != nil { - // reuse existing item - item.Message = msg - return nil - } - - mti, ok := pb.(messageTypeIder) - if !ok { - return errNoMessageTypeID - } - - mtid := mti.MessageTypeId() - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: &mtid, - Message: msg, - }) - return nil -} - -func (ms *messageSet) Reset() { *ms = messageSet{} } -func (ms *messageSet) String() string { return CompactTextString(ms) } -func (*messageSet) ProtoMessage() {} - -// Support for the message_set_wire_format message option. - -func skipVarint(buf []byte) []byte { - i := 0 - for ; buf[i]&0x80 != 0; i++ { - } - return buf[i+1:] -} - -// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. -// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. -func unmarshalMessageSet(buf []byte, exts interface{}) error { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - m = exts.extensionsWrite() - case map[int32]Extension: - m = exts - default: - return errors.New("proto: not an extension map") - } - - ms := new(messageSet) - if err := Unmarshal(buf, ms); err != nil { - return err - } - for _, item := range ms.Item { - id := *item.TypeId - msg := item.Message - - // Restore wire type and field number varint, plus length varint. - // Be careful to preserve duplicate items. - b := EncodeVarint(uint64(id)<<3 | WireBytes) - if ext, ok := m[id]; ok { - // Existing data; rip off the tag and length varint - // so we join the new data correctly. - // We can assume that ext.enc is set because we are unmarshaling. - o := ext.enc[len(b):] // skip wire type and field number - _, n := DecodeVarint(o) // calculate length of length varint - o = o[n:] // skip length varint - msg = append(o, msg...) // join old data and new data - } - b = append(b, EncodeVarint(uint64(len(msg)))...) - b = append(b, msg...) - - m[id] = Extension{enc: b} - } - return nil -} diff --git a/cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/pointer_reflect.go deleted file mode 100644 index 94fa919..0000000 --- a/cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/pointer_reflect.go +++ /dev/null @@ -1,360 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build purego appengine js - -// This file contains an implementation of proto field accesses using package reflect. -// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can -// be used on App Engine. - -package proto - -import ( - "reflect" - "sync" -) - -const unsafeAllowed = false - -// A field identifies a field in a struct, accessible from a pointer. -// In this implementation, a field is identified by the sequence of field indices -// passed to reflect's FieldByIndex. -type field []int - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return f.Index -} - -// invalidField is an invalid field identifier. -var invalidField = field(nil) - -// zeroField is a noop when calling pointer.offset. -var zeroField = field([]int{}) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { return f != nil } - -// The pointer type is for the table-driven decoder. -// The implementation here uses a reflect.Value of pointer type to -// create a generic pointer. In pointer_unsafe.go we use unsafe -// instead of reflect to implement the same (but faster) interface. -type pointer struct { - v reflect.Value -} - -// toPointer converts an interface of pointer type to a pointer -// that points to the same target. -func toPointer(i *Message) pointer { - return pointer{v: reflect.ValueOf(*i)} -} - -// toAddrPointer converts an interface to a pointer that points to -// the interface data. -func toAddrPointer(i *interface{}, isptr, deref bool) pointer { - v := reflect.ValueOf(*i) - u := reflect.New(v.Type()) - u.Elem().Set(v) - if deref { - u = u.Elem() - } - return pointer{v: u} -} - -// valToPointer converts v to a pointer. v must be of pointer type. -func valToPointer(v reflect.Value) pointer { - return pointer{v: v} -} - -// offset converts from a pointer to a structure to a pointer to -// one of its fields. -func (p pointer) offset(f field) pointer { - return pointer{v: p.v.Elem().FieldByIndex(f).Addr()} -} - -func (p pointer) isNil() bool { - return p.v.IsNil() -} - -// grow updates the slice s in place to make it one element longer. -// s must be addressable. -// Returns the (addressable) new element. -func grow(s reflect.Value) reflect.Value { - n, m := s.Len(), s.Cap() - if n < m { - s.SetLen(n + 1) - } else { - s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem()))) - } - return s.Index(n) -} - -func (p pointer) toInt64() *int64 { - return p.v.Interface().(*int64) -} -func (p pointer) toInt64Ptr() **int64 { - return p.v.Interface().(**int64) -} -func (p pointer) toInt64Slice() *[]int64 { - return p.v.Interface().(*[]int64) -} - -var int32ptr = reflect.TypeOf((*int32)(nil)) - -func (p pointer) toInt32() *int32 { - return p.v.Convert(int32ptr).Interface().(*int32) -} - -// The toInt32Ptr/Slice methods don't work because of enums. -// Instead, we must use set/get methods for the int32ptr/slice case. -/* - func (p pointer) toInt32Ptr() **int32 { - return p.v.Interface().(**int32) -} - func (p pointer) toInt32Slice() *[]int32 { - return p.v.Interface().(*[]int32) -} -*/ -func (p pointer) getInt32Ptr() *int32 { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - return p.v.Elem().Interface().(*int32) - } - // an enum - return p.v.Elem().Convert(int32PtrType).Interface().(*int32) -} -func (p pointer) setInt32Ptr(v int32) { - // Allocate value in a *int32. Possibly convert that to a *enum. - // Then assign it to a **int32 or **enum. - // Note: we can convert *int32 to *enum, but we can't convert - // **int32 to **enum! - p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem())) -} - -// getInt32Slice copies []int32 from p as a new slice. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) getInt32Slice() []int32 { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - return p.v.Elem().Interface().([]int32) - } - // an enum - // Allocate a []int32, then assign []enum's values into it. - // Note: we can't convert []enum to []int32. - slice := p.v.Elem() - s := make([]int32, slice.Len()) - for i := 0; i < slice.Len(); i++ { - s[i] = int32(slice.Index(i).Int()) - } - return s -} - -// setInt32Slice copies []int32 into p as a new slice. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) setInt32Slice(v []int32) { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - p.v.Elem().Set(reflect.ValueOf(v)) - return - } - // an enum - // Allocate a []enum, then assign []int32's values into it. - // Note: we can't convert []enum to []int32. - slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v)) - for i, x := range v { - slice.Index(i).SetInt(int64(x)) - } - p.v.Elem().Set(slice) -} -func (p pointer) appendInt32Slice(v int32) { - grow(p.v.Elem()).SetInt(int64(v)) -} - -func (p pointer) toUint64() *uint64 { - return p.v.Interface().(*uint64) -} -func (p pointer) toUint64Ptr() **uint64 { - return p.v.Interface().(**uint64) -} -func (p pointer) toUint64Slice() *[]uint64 { - return p.v.Interface().(*[]uint64) -} -func (p pointer) toUint32() *uint32 { - return p.v.Interface().(*uint32) -} -func (p pointer) toUint32Ptr() **uint32 { - return p.v.Interface().(**uint32) -} -func (p pointer) toUint32Slice() *[]uint32 { - return p.v.Interface().(*[]uint32) -} -func (p pointer) toBool() *bool { - return p.v.Interface().(*bool) -} -func (p pointer) toBoolPtr() **bool { - return p.v.Interface().(**bool) -} -func (p pointer) toBoolSlice() *[]bool { - return p.v.Interface().(*[]bool) -} -func (p pointer) toFloat64() *float64 { - return p.v.Interface().(*float64) -} -func (p pointer) toFloat64Ptr() **float64 { - return p.v.Interface().(**float64) -} -func (p pointer) toFloat64Slice() *[]float64 { - return p.v.Interface().(*[]float64) -} -func (p pointer) toFloat32() *float32 { - return p.v.Interface().(*float32) -} -func (p pointer) toFloat32Ptr() **float32 { - return p.v.Interface().(**float32) -} -func (p pointer) toFloat32Slice() *[]float32 { - return p.v.Interface().(*[]float32) -} -func (p pointer) toString() *string { - return p.v.Interface().(*string) -} -func (p pointer) toStringPtr() **string { - return p.v.Interface().(**string) -} -func (p pointer) toStringSlice() *[]string { - return p.v.Interface().(*[]string) -} -func (p pointer) toBytes() *[]byte { - return p.v.Interface().(*[]byte) -} -func (p pointer) toBytesSlice() *[][]byte { - return p.v.Interface().(*[][]byte) -} -func (p pointer) toExtensions() *XXX_InternalExtensions { - return p.v.Interface().(*XXX_InternalExtensions) -} -func (p pointer) toOldExtensions() *map[int32]Extension { - return p.v.Interface().(*map[int32]Extension) -} -func (p pointer) getPointer() pointer { - return pointer{v: p.v.Elem()} -} -func (p pointer) setPointer(q pointer) { - p.v.Elem().Set(q.v) -} -func (p pointer) appendPointer(q pointer) { - grow(p.v.Elem()).Set(q.v) -} - -// getPointerSlice copies []*T from p as a new []pointer. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) getPointerSlice() []pointer { - if p.v.IsNil() { - return nil - } - n := p.v.Elem().Len() - s := make([]pointer, n) - for i := 0; i < n; i++ { - s[i] = pointer{v: p.v.Elem().Index(i)} - } - return s -} - -// setPointerSlice copies []pointer into p as a new []*T. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) setPointerSlice(v []pointer) { - if v == nil { - p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem()) - return - } - s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v)) - for _, p := range v { - s = reflect.Append(s, p.v) - } - p.v.Elem().Set(s) -} - -// getInterfacePointer returns a pointer that points to the -// interface data of the interface pointed by p. -func (p pointer) getInterfacePointer() pointer { - if p.v.Elem().IsNil() { - return pointer{v: p.v.Elem()} - } - return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct -} - -func (p pointer) asPointerTo(t reflect.Type) reflect.Value { - // TODO: check that p.v.Type().Elem() == t? - return p.v -} - -func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} - -var atomicLock sync.Mutex diff --git a/cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go deleted file mode 100644 index dbfffe0..0000000 --- a/cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go +++ /dev/null @@ -1,313 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !purego,!appengine,!js - -// This file contains the implementation of the proto field accesses using package unsafe. - -package proto - -import ( - "reflect" - "sync/atomic" - "unsafe" -) - -const unsafeAllowed = true - -// A field identifies a field in a struct, accessible from a pointer. -// In this implementation, a field is identified by its byte offset from the start of the struct. -type field uintptr - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return field(f.Offset) -} - -// invalidField is an invalid field identifier. -const invalidField = ^field(0) - -// zeroField is a noop when calling pointer.offset. -const zeroField = field(0) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { - return f != invalidField -} - -// The pointer type below is for the new table-driven encoder/decoder. -// The implementation here uses unsafe.Pointer to create a generic pointer. -// In pointer_reflect.go we use reflect instead of unsafe to implement -// the same (but slower) interface. -type pointer struct { - p unsafe.Pointer -} - -// size of pointer -var ptrSize = unsafe.Sizeof(uintptr(0)) - -// toPointer converts an interface of pointer type to a pointer -// that points to the same target. -func toPointer(i *Message) pointer { - // Super-tricky - read pointer out of data word of interface value. - // Saves ~25ns over the equivalent: - // return valToPointer(reflect.ValueOf(*i)) - return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} -} - -// toAddrPointer converts an interface to a pointer that points to -// the interface data. -func toAddrPointer(i *interface{}, isptr, deref bool) (p pointer) { - // Super-tricky - read or get the address of data word of interface value. - if isptr { - // The interface is of pointer type, thus it is a direct interface. - // The data word is the pointer data itself. We take its address. - p = pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} - } else { - // The interface is not of pointer type. The data word is the pointer - // to the data. - p = pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} - } - if deref { - p.p = *(*unsafe.Pointer)(p.p) - } - return p -} - -// valToPointer converts v to a pointer. v must be of pointer type. -func valToPointer(v reflect.Value) pointer { - return pointer{p: unsafe.Pointer(v.Pointer())} -} - -// offset converts from a pointer to a structure to a pointer to -// one of its fields. -func (p pointer) offset(f field) pointer { - // For safety, we should panic if !f.IsValid, however calling panic causes - // this to no longer be inlineable, which is a serious performance cost. - /* - if !f.IsValid() { - panic("invalid field") - } - */ - return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} -} - -func (p pointer) isNil() bool { - return p.p == nil -} - -func (p pointer) toInt64() *int64 { - return (*int64)(p.p) -} -func (p pointer) toInt64Ptr() **int64 { - return (**int64)(p.p) -} -func (p pointer) toInt64Slice() *[]int64 { - return (*[]int64)(p.p) -} -func (p pointer) toInt32() *int32 { - return (*int32)(p.p) -} - -// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist. -/* - func (p pointer) toInt32Ptr() **int32 { - return (**int32)(p.p) - } - func (p pointer) toInt32Slice() *[]int32 { - return (*[]int32)(p.p) - } -*/ -func (p pointer) getInt32Ptr() *int32 { - return *(**int32)(p.p) -} -func (p pointer) setInt32Ptr(v int32) { - *(**int32)(p.p) = &v -} - -// getInt32Slice loads a []int32 from p. -// The value returned is aliased with the original slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) getInt32Slice() []int32 { - return *(*[]int32)(p.p) -} - -// setInt32Slice stores a []int32 to p. -// The value set is aliased with the input slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) setInt32Slice(v []int32) { - *(*[]int32)(p.p) = v -} - -// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead? -func (p pointer) appendInt32Slice(v int32) { - s := (*[]int32)(p.p) - *s = append(*s, v) -} - -func (p pointer) toUint64() *uint64 { - return (*uint64)(p.p) -} -func (p pointer) toUint64Ptr() **uint64 { - return (**uint64)(p.p) -} -func (p pointer) toUint64Slice() *[]uint64 { - return (*[]uint64)(p.p) -} -func (p pointer) toUint32() *uint32 { - return (*uint32)(p.p) -} -func (p pointer) toUint32Ptr() **uint32 { - return (**uint32)(p.p) -} -func (p pointer) toUint32Slice() *[]uint32 { - return (*[]uint32)(p.p) -} -func (p pointer) toBool() *bool { - return (*bool)(p.p) -} -func (p pointer) toBoolPtr() **bool { - return (**bool)(p.p) -} -func (p pointer) toBoolSlice() *[]bool { - return (*[]bool)(p.p) -} -func (p pointer) toFloat64() *float64 { - return (*float64)(p.p) -} -func (p pointer) toFloat64Ptr() **float64 { - return (**float64)(p.p) -} -func (p pointer) toFloat64Slice() *[]float64 { - return (*[]float64)(p.p) -} -func (p pointer) toFloat32() *float32 { - return (*float32)(p.p) -} -func (p pointer) toFloat32Ptr() **float32 { - return (**float32)(p.p) -} -func (p pointer) toFloat32Slice() *[]float32 { - return (*[]float32)(p.p) -} -func (p pointer) toString() *string { - return (*string)(p.p) -} -func (p pointer) toStringPtr() **string { - return (**string)(p.p) -} -func (p pointer) toStringSlice() *[]string { - return (*[]string)(p.p) -} -func (p pointer) toBytes() *[]byte { - return (*[]byte)(p.p) -} -func (p pointer) toBytesSlice() *[][]byte { - return (*[][]byte)(p.p) -} -func (p pointer) toExtensions() *XXX_InternalExtensions { - return (*XXX_InternalExtensions)(p.p) -} -func (p pointer) toOldExtensions() *map[int32]Extension { - return (*map[int32]Extension)(p.p) -} - -// getPointerSlice loads []*T from p as a []pointer. -// The value returned is aliased with the original slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) getPointerSlice() []pointer { - // Super-tricky - p should point to a []*T where T is a - // message type. We load it as []pointer. - return *(*[]pointer)(p.p) -} - -// setPointerSlice stores []pointer into p as a []*T. -// The value set is aliased with the input slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) setPointerSlice(v []pointer) { - // Super-tricky - p should point to a []*T where T is a - // message type. We store it as []pointer. - *(*[]pointer)(p.p) = v -} - -// getPointer loads the pointer at p and returns it. -func (p pointer) getPointer() pointer { - return pointer{p: *(*unsafe.Pointer)(p.p)} -} - -// setPointer stores the pointer q at p. -func (p pointer) setPointer(q pointer) { - *(*unsafe.Pointer)(p.p) = q.p -} - -// append q to the slice pointed to by p. -func (p pointer) appendPointer(q pointer) { - s := (*[]unsafe.Pointer)(p.p) - *s = append(*s, q.p) -} - -// getInterfacePointer returns a pointer that points to the -// interface data of the interface pointed by p. -func (p pointer) getInterfacePointer() pointer { - // Super-tricky - read pointer out of data word of interface value. - return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]} -} - -// asPointerTo returns a reflect.Value that is a pointer to an -// object of type t stored at p. -func (p pointer) asPointerTo(t reflect.Type) reflect.Value { - return reflect.NewAt(t, p.p) -} - -func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { - return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { - return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { - return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { - return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} diff --git a/cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/properties.go b/cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/properties.go deleted file mode 100644 index 79668ff..0000000 --- a/cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/properties.go +++ /dev/null @@ -1,545 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "fmt" - "log" - "os" - "reflect" - "sort" - "strconv" - "strings" - "sync" -) - -const debug bool = false - -// Constants that identify the encoding of a value on the wire. -const ( - WireVarint = 0 - WireFixed64 = 1 - WireBytes = 2 - WireStartGroup = 3 - WireEndGroup = 4 - WireFixed32 = 5 -) - -// tagMap is an optimization over map[int]int for typical protocol buffer -// use-cases. Encoded protocol buffers are often in tag order with small tag -// numbers. -type tagMap struct { - fastTags []int - slowTags map[int]int -} - -// tagMapFastLimit is the upper bound on the tag number that will be stored in -// the tagMap slice rather than its map. -const tagMapFastLimit = 1024 - -func (p *tagMap) get(t int) (int, bool) { - if t > 0 && t < tagMapFastLimit { - if t >= len(p.fastTags) { - return 0, false - } - fi := p.fastTags[t] - return fi, fi >= 0 - } - fi, ok := p.slowTags[t] - return fi, ok -} - -func (p *tagMap) put(t int, fi int) { - if t > 0 && t < tagMapFastLimit { - for len(p.fastTags) < t+1 { - p.fastTags = append(p.fastTags, -1) - } - p.fastTags[t] = fi - return - } - if p.slowTags == nil { - p.slowTags = make(map[int]int) - } - p.slowTags[t] = fi -} - -// StructProperties represents properties for all the fields of a struct. -// decoderTags and decoderOrigNames should only be used by the decoder. -type StructProperties struct { - Prop []*Properties // properties for each field - reqCount int // required count - decoderTags tagMap // map from proto tag to struct field number - decoderOrigNames map[string]int // map from original name to struct field number - order []int // list of struct field numbers in tag order - - // OneofTypes contains information about the oneof fields in this message. - // It is keyed by the original name of a field. - OneofTypes map[string]*OneofProperties -} - -// OneofProperties represents information about a specific field in a oneof. -type OneofProperties struct { - Type reflect.Type // pointer to generated struct type for this oneof field - Field int // struct field number of the containing oneof in the message - Prop *Properties -} - -// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. -// See encode.go, (*Buffer).enc_struct. - -func (sp *StructProperties) Len() int { return len(sp.order) } -func (sp *StructProperties) Less(i, j int) bool { - return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag -} -func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } - -// Properties represents the protocol-specific behavior of a single struct field. -type Properties struct { - Name string // name of the field, for error messages - OrigName string // original name before protocol compiler (always set) - JSONName string // name to use for JSON; determined by protoc - Wire string - WireType int - Tag int - Required bool - Optional bool - Repeated bool - Packed bool // relevant for repeated primitives only - Enum string // set for enum types only - proto3 bool // whether this is known to be a proto3 field - oneof bool // whether this is a oneof field - - Default string // default value - HasDefault bool // whether an explicit default was provided - - stype reflect.Type // set for struct types only - sprop *StructProperties // set for struct types only - - mtype reflect.Type // set for map types only - MapKeyProp *Properties // set for map types only - MapValProp *Properties // set for map types only -} - -// String formats the properties in the protobuf struct field tag style. -func (p *Properties) String() string { - s := p.Wire - s += "," - s += strconv.Itoa(p.Tag) - if p.Required { - s += ",req" - } - if p.Optional { - s += ",opt" - } - if p.Repeated { - s += ",rep" - } - if p.Packed { - s += ",packed" - } - s += ",name=" + p.OrigName - if p.JSONName != p.OrigName { - s += ",json=" + p.JSONName - } - if p.proto3 { - s += ",proto3" - } - if p.oneof { - s += ",oneof" - } - if len(p.Enum) > 0 { - s += ",enum=" + p.Enum - } - if p.HasDefault { - s += ",def=" + p.Default - } - return s -} - -// Parse populates p by parsing a string in the protobuf struct field tag style. -func (p *Properties) Parse(s string) { - // "bytes,49,opt,name=foo,def=hello!" - fields := strings.Split(s, ",") // breaks def=, but handled below. - if len(fields) < 2 { - fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) - return - } - - p.Wire = fields[0] - switch p.Wire { - case "varint": - p.WireType = WireVarint - case "fixed32": - p.WireType = WireFixed32 - case "fixed64": - p.WireType = WireFixed64 - case "zigzag32": - p.WireType = WireVarint - case "zigzag64": - p.WireType = WireVarint - case "bytes", "group": - p.WireType = WireBytes - // no numeric converter for non-numeric types - default: - fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) - return - } - - var err error - p.Tag, err = strconv.Atoi(fields[1]) - if err != nil { - return - } - -outer: - for i := 2; i < len(fields); i++ { - f := fields[i] - switch { - case f == "req": - p.Required = true - case f == "opt": - p.Optional = true - case f == "rep": - p.Repeated = true - case f == "packed": - p.Packed = true - case strings.HasPrefix(f, "name="): - p.OrigName = f[5:] - case strings.HasPrefix(f, "json="): - p.JSONName = f[5:] - case strings.HasPrefix(f, "enum="): - p.Enum = f[5:] - case f == "proto3": - p.proto3 = true - case f == "oneof": - p.oneof = true - case strings.HasPrefix(f, "def="): - p.HasDefault = true - p.Default = f[4:] // rest of string - if i+1 < len(fields) { - // Commas aren't escaped, and def is always last. - p.Default += "," + strings.Join(fields[i+1:], ",") - break outer - } - } - } -} - -var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() - -// setFieldProps initializes the field properties for submessages and maps. -func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { - switch t1 := typ; t1.Kind() { - case reflect.Ptr: - if t1.Elem().Kind() == reflect.Struct { - p.stype = t1.Elem() - } - - case reflect.Slice: - if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct { - p.stype = t2.Elem() - } - - case reflect.Map: - p.mtype = t1 - p.MapKeyProp = &Properties{} - p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) - p.MapValProp = &Properties{} - vtype := p.mtype.Elem() - if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { - // The value type is not a message (*T) or bytes ([]byte), - // so we need encoders for the pointer to this type. - vtype = reflect.PtrTo(vtype) - } - p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) - } - - if p.stype != nil { - if lockGetProp { - p.sprop = GetProperties(p.stype) - } else { - p.sprop = getPropertiesLocked(p.stype) - } - } -} - -var ( - marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() -) - -// Init populates the properties from a protocol buffer struct tag. -func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { - p.init(typ, name, tag, f, true) -} - -func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { - // "bytes,49,opt,def=hello!" - p.Name = name - p.OrigName = name - if tag == "" { - return - } - p.Parse(tag) - p.setFieldProps(typ, f, lockGetProp) -} - -var ( - propertiesMu sync.RWMutex - propertiesMap = make(map[reflect.Type]*StructProperties) -) - -// GetProperties returns the list of properties for the type represented by t. -// t must represent a generated struct type of a protocol message. -func GetProperties(t reflect.Type) *StructProperties { - if t.Kind() != reflect.Struct { - panic("proto: type must have kind struct") - } - - // Most calls to GetProperties in a long-running program will be - // retrieving details for types we have seen before. - propertiesMu.RLock() - sprop, ok := propertiesMap[t] - propertiesMu.RUnlock() - if ok { - return sprop - } - - propertiesMu.Lock() - sprop = getPropertiesLocked(t) - propertiesMu.Unlock() - return sprop -} - -type ( - oneofFuncsIface interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) - } - oneofWrappersIface interface { - XXX_OneofWrappers() []interface{} - } -) - -// getPropertiesLocked requires that propertiesMu is held. -func getPropertiesLocked(t reflect.Type) *StructProperties { - if prop, ok := propertiesMap[t]; ok { - return prop - } - - prop := new(StructProperties) - // in case of recursive protos, fill this in now. - propertiesMap[t] = prop - - // build properties - prop.Prop = make([]*Properties, t.NumField()) - prop.order = make([]int, t.NumField()) - - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - p := new(Properties) - name := f.Name - p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) - - oneof := f.Tag.Get("protobuf_oneof") // special case - if oneof != "" { - // Oneof fields don't use the traditional protobuf tag. - p.OrigName = oneof - } - prop.Prop[i] = p - prop.order[i] = i - if debug { - print(i, " ", f.Name, " ", t.String(), " ") - if p.Tag > 0 { - print(p.String()) - } - print("\n") - } - } - - // Re-order prop.order. - sort.Sort(prop) - - var oots []interface{} - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oots = m.XXX_OneofFuncs() - case oneofWrappersIface: - oots = m.XXX_OneofWrappers() - } - if len(oots) > 0 { - // Interpret oneof metadata. - prop.OneofTypes = make(map[string]*OneofProperties) - for _, oot := range oots { - oop := &OneofProperties{ - Type: reflect.ValueOf(oot).Type(), // *T - Prop: new(Properties), - } - sft := oop.Type.Elem().Field(0) - oop.Prop.Name = sft.Name - oop.Prop.Parse(sft.Tag.Get("protobuf")) - // There will be exactly one interface field that - // this new value is assignable to. - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if f.Type.Kind() != reflect.Interface { - continue - } - if !oop.Type.AssignableTo(f.Type) { - continue - } - oop.Field = i - break - } - prop.OneofTypes[oop.Prop.OrigName] = oop - } - } - - // build required counts - // build tags - reqCount := 0 - prop.decoderOrigNames = make(map[string]int) - for i, p := range prop.Prop { - if strings.HasPrefix(p.Name, "XXX_") { - // Internal fields should not appear in tags/origNames maps. - // They are handled specially when encoding and decoding. - continue - } - if p.Required { - reqCount++ - } - prop.decoderTags.put(p.Tag, i) - prop.decoderOrigNames[p.OrigName] = i - } - prop.reqCount = reqCount - - return prop -} - -// A global registry of enum types. -// The generated code will register the generated maps by calling RegisterEnum. - -var enumValueMaps = make(map[string]map[string]int32) - -// RegisterEnum is called from the generated code to install the enum descriptor -// maps into the global table to aid parsing text format protocol buffers. -func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { - if _, ok := enumValueMaps[typeName]; ok { - panic("proto: duplicate enum registered: " + typeName) - } - enumValueMaps[typeName] = valueMap -} - -// EnumValueMap returns the mapping from names to integers of the -// enum type enumType, or a nil if not found. -func EnumValueMap(enumType string) map[string]int32 { - return enumValueMaps[enumType] -} - -// A registry of all linked message types. -// The string is a fully-qualified proto name ("pkg.Message"). -var ( - protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers - protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types - revProtoTypes = make(map[reflect.Type]string) -) - -// RegisterType is called from generated code and maps from the fully qualified -// proto name to the type (pointer to struct) of the protocol buffer. -func RegisterType(x Message, name string) { - if _, ok := protoTypedNils[name]; ok { - // TODO: Some day, make this a panic. - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 { - // Generated code always calls RegisterType with nil x. - // This check is just for extra safety. - protoTypedNils[name] = x - } else { - protoTypedNils[name] = reflect.Zero(t).Interface().(Message) - } - revProtoTypes[t] = name -} - -// RegisterMapType is called from generated code and maps from the fully qualified -// proto name to the native map type of the proto map definition. -func RegisterMapType(x interface{}, name string) { - if reflect.TypeOf(x).Kind() != reflect.Map { - panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name)) - } - if _, ok := protoMapTypes[name]; ok { - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - protoMapTypes[name] = t - revProtoTypes[t] = name -} - -// MessageName returns the fully-qualified proto name for the given message type. -func MessageName(x Message) string { - type xname interface { - XXX_MessageName() string - } - if m, ok := x.(xname); ok { - return m.XXX_MessageName() - } - return revProtoTypes[reflect.TypeOf(x)] -} - -// MessageType returns the message type (pointer to struct) for a named message. -// The type is not guaranteed to implement proto.Message if the name refers to a -// map entry. -func MessageType(name string) reflect.Type { - if t, ok := protoTypedNils[name]; ok { - return reflect.TypeOf(t) - } - return protoMapTypes[name] -} - -// A registry of all linked proto files. -var ( - protoFiles = make(map[string][]byte) // file name => fileDescriptor -) - -// RegisterFile is called from generated code and maps from the -// full file name of a .proto file to its compressed FileDescriptorProto. -func RegisterFile(filename string, fileDescriptor []byte) { - protoFiles[filename] = fileDescriptor -} - -// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. -func FileDescriptor(filename string) []byte { return protoFiles[filename] } diff --git a/cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/table_marshal.go b/cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/table_marshal.go deleted file mode 100644 index 5cb11fa..0000000 --- a/cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/table_marshal.go +++ /dev/null @@ -1,2776 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "errors" - "fmt" - "math" - "reflect" - "sort" - "strconv" - "strings" - "sync" - "sync/atomic" - "unicode/utf8" -) - -// a sizer takes a pointer to a field and the size of its tag, computes the size of -// the encoded data. -type sizer func(pointer, int) int - -// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format), -// marshals the field to the end of the slice, returns the slice and error (if any). -type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) - -// marshalInfo is the information used for marshaling a message. -type marshalInfo struct { - typ reflect.Type - fields []*marshalFieldInfo - unrecognized field // offset of XXX_unrecognized - extensions field // offset of XXX_InternalExtensions - v1extensions field // offset of XXX_extensions - sizecache field // offset of XXX_sizecache - initialized int32 // 0 -- only typ is set, 1 -- fully initialized - messageset bool // uses message set wire format - hasmarshaler bool // has custom marshaler - sync.RWMutex // protect extElems map, also for initialization - extElems map[int32]*marshalElemInfo // info of extension elements -} - -// marshalFieldInfo is the information used for marshaling a field of a message. -type marshalFieldInfo struct { - field field - wiretag uint64 // tag in wire format - tagsize int // size of tag in wire format - sizer sizer - marshaler marshaler - isPointer bool - required bool // field is required - name string // name of the field, for error reporting - oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements -} - -// marshalElemInfo is the information used for marshaling an extension or oneof element. -type marshalElemInfo struct { - wiretag uint64 // tag in wire format - tagsize int // size of tag in wire format - sizer sizer - marshaler marshaler - isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) - deref bool // dereference the pointer before operating on it; implies isptr -} - -var ( - marshalInfoMap = map[reflect.Type]*marshalInfo{} - marshalInfoLock sync.Mutex -) - -// getMarshalInfo returns the information to marshal a given type of message. -// The info it returns may not necessarily initialized. -// t is the type of the message (NOT the pointer to it). -func getMarshalInfo(t reflect.Type) *marshalInfo { - marshalInfoLock.Lock() - u, ok := marshalInfoMap[t] - if !ok { - u = &marshalInfo{typ: t} - marshalInfoMap[t] = u - } - marshalInfoLock.Unlock() - return u -} - -// Size is the entry point from generated code, -// and should be ONLY called by generated code. -// It computes the size of encoded data of msg. -// a is a pointer to a place to store cached marshal info. -func (a *InternalMessageInfo) Size(msg Message) int { - u := getMessageMarshalInfo(msg, a) - ptr := toPointer(&msg) - if ptr.isNil() { - // We get here if msg is a typed nil ((*SomeMessage)(nil)), - // so it satisfies the interface, and msg == nil wouldn't - // catch it. We don't want crash in this case. - return 0 - } - return u.size(ptr) -} - -// Marshal is the entry point from generated code, -// and should be ONLY called by generated code. -// It marshals msg to the end of b. -// a is a pointer to a place to store cached marshal info. -func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) { - u := getMessageMarshalInfo(msg, a) - ptr := toPointer(&msg) - if ptr.isNil() { - // We get here if msg is a typed nil ((*SomeMessage)(nil)), - // so it satisfies the interface, and msg == nil wouldn't - // catch it. We don't want crash in this case. - return b, ErrNil - } - return u.marshal(b, ptr, deterministic) -} - -func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo { - // u := a.marshal, but atomically. - // We use an atomic here to ensure memory consistency. - u := atomicLoadMarshalInfo(&a.marshal) - if u == nil { - // Get marshal information from type of message. - t := reflect.ValueOf(msg).Type() - if t.Kind() != reflect.Ptr { - panic(fmt.Sprintf("cannot handle non-pointer message type %v", t)) - } - u = getMarshalInfo(t.Elem()) - // Store it in the cache for later users. - // a.marshal = u, but atomically. - atomicStoreMarshalInfo(&a.marshal, u) - } - return u -} - -// size is the main function to compute the size of the encoded data of a message. -// ptr is the pointer to the message. -func (u *marshalInfo) size(ptr pointer) int { - if atomic.LoadInt32(&u.initialized) == 0 { - u.computeMarshalInfo() - } - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if u.hasmarshaler { - m := ptr.asPointerTo(u.typ).Interface().(Marshaler) - b, _ := m.Marshal() - return len(b) - } - - n := 0 - for _, f := range u.fields { - if f.isPointer && ptr.offset(f.field).getPointer().isNil() { - // nil pointer always marshals to nothing - continue - } - n += f.sizer(ptr.offset(f.field), f.tagsize) - } - if u.extensions.IsValid() { - e := ptr.offset(u.extensions).toExtensions() - if u.messageset { - n += u.sizeMessageSet(e) - } else { - n += u.sizeExtensions(e) - } - } - if u.v1extensions.IsValid() { - m := *ptr.offset(u.v1extensions).toOldExtensions() - n += u.sizeV1Extensions(m) - } - if u.unrecognized.IsValid() { - s := *ptr.offset(u.unrecognized).toBytes() - n += len(s) - } - // cache the result for use in marshal - if u.sizecache.IsValid() { - atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n)) - } - return n -} - -// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated), -// fall back to compute the size. -func (u *marshalInfo) cachedsize(ptr pointer) int { - if u.sizecache.IsValid() { - return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32())) - } - return u.size(ptr) -} - -// marshal is the main function to marshal a message. It takes a byte slice and appends -// the encoded data to the end of the slice, returns the slice and error (if any). -// ptr is the pointer to the message. -// If deterministic is true, map is marshaled in deterministic order. -func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) { - if atomic.LoadInt32(&u.initialized) == 0 { - u.computeMarshalInfo() - } - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if u.hasmarshaler { - m := ptr.asPointerTo(u.typ).Interface().(Marshaler) - b1, err := m.Marshal() - b = append(b, b1...) - return b, err - } - - var err, errLater error - // The old marshaler encodes extensions at beginning. - if u.extensions.IsValid() { - e := ptr.offset(u.extensions).toExtensions() - if u.messageset { - b, err = u.appendMessageSet(b, e, deterministic) - } else { - b, err = u.appendExtensions(b, e, deterministic) - } - if err != nil { - return b, err - } - } - if u.v1extensions.IsValid() { - m := *ptr.offset(u.v1extensions).toOldExtensions() - b, err = u.appendV1Extensions(b, m, deterministic) - if err != nil { - return b, err - } - } - for _, f := range u.fields { - if f.required { - if ptr.offset(f.field).getPointer().isNil() { - // Required field is not set. - // We record the error but keep going, to give a complete marshaling. - if errLater == nil { - errLater = &RequiredNotSetError{f.name} - } - continue - } - } - if f.isPointer && ptr.offset(f.field).getPointer().isNil() { - // nil pointer always marshals to nothing - continue - } - b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic) - if err != nil { - if err1, ok := err.(*RequiredNotSetError); ok { - // Required field in submessage is not set. - // We record the error but keep going, to give a complete marshaling. - if errLater == nil { - errLater = &RequiredNotSetError{f.name + "." + err1.field} - } - continue - } - if err == errRepeatedHasNil { - err = errors.New("proto: repeated field " + f.name + " has nil element") - } - if err == errInvalidUTF8 { - if errLater == nil { - fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name - errLater = &invalidUTF8Error{fullName} - } - continue - } - return b, err - } - } - if u.unrecognized.IsValid() { - s := *ptr.offset(u.unrecognized).toBytes() - b = append(b, s...) - } - return b, errLater -} - -// computeMarshalInfo initializes the marshal info. -func (u *marshalInfo) computeMarshalInfo() { - u.Lock() - defer u.Unlock() - if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock - return - } - - t := u.typ - u.unrecognized = invalidField - u.extensions = invalidField - u.v1extensions = invalidField - u.sizecache = invalidField - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if reflect.PtrTo(t).Implements(marshalerType) { - u.hasmarshaler = true - atomic.StoreInt32(&u.initialized, 1) - return - } - - // get oneof implementers - var oneofImplementers []interface{} - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oneofImplementers = m.XXX_OneofFuncs() - case oneofWrappersIface: - oneofImplementers = m.XXX_OneofWrappers() - } - - n := t.NumField() - - // deal with XXX fields first - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if !strings.HasPrefix(f.Name, "XXX_") { - continue - } - switch f.Name { - case "XXX_sizecache": - u.sizecache = toField(&f) - case "XXX_unrecognized": - u.unrecognized = toField(&f) - case "XXX_InternalExtensions": - u.extensions = toField(&f) - u.messageset = f.Tag.Get("protobuf_messageset") == "1" - case "XXX_extensions": - u.v1extensions = toField(&f) - case "XXX_NoUnkeyedLiteral": - // nothing to do - default: - panic("unknown XXX field: " + f.Name) - } - n-- - } - - // normal fields - fields := make([]marshalFieldInfo, n) // batch allocation - u.fields = make([]*marshalFieldInfo, 0, n) - for i, j := 0, 0; i < t.NumField(); i++ { - f := t.Field(i) - - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - field := &fields[j] - j++ - field.name = f.Name - u.fields = append(u.fields, field) - if f.Tag.Get("protobuf_oneof") != "" { - field.computeOneofFieldInfo(&f, oneofImplementers) - continue - } - if f.Tag.Get("protobuf") == "" { - // field has no tag (not in generated message), ignore it - u.fields = u.fields[:len(u.fields)-1] - j-- - continue - } - field.computeMarshalFieldInfo(&f) - } - - // fields are marshaled in tag order on the wire. - sort.Sort(byTag(u.fields)) - - atomic.StoreInt32(&u.initialized, 1) -} - -// helper for sorting fields by tag -type byTag []*marshalFieldInfo - -func (a byTag) Len() int { return len(a) } -func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag } - -// getExtElemInfo returns the information to marshal an extension element. -// The info it returns is initialized. -func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { - // get from cache first - u.RLock() - e, ok := u.extElems[desc.Field] - u.RUnlock() - if ok { - return e - } - - t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct - tags := strings.Split(desc.Tag, ",") - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - if t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct { - t = t.Elem() - } - sizer, marshaler := typeMarshaler(t, tags, false, false) - var deref bool - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - t = reflect.PtrTo(t) - deref = true - } - e = &marshalElemInfo{ - wiretag: uint64(tag)<<3 | wt, - tagsize: SizeVarint(uint64(tag) << 3), - sizer: sizer, - marshaler: marshaler, - isptr: t.Kind() == reflect.Ptr, - deref: deref, - } - - // update cache - u.Lock() - if u.extElems == nil { - u.extElems = make(map[int32]*marshalElemInfo) - } - u.extElems[desc.Field] = e - u.Unlock() - return e -} - -// computeMarshalFieldInfo fills up the information to marshal a field. -func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { - // parse protobuf tag of the field. - // tag has format of "bytes,49,opt,name=foo,def=hello!" - tags := strings.Split(f.Tag.Get("protobuf"), ",") - if tags[0] == "" { - return - } - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - if tags[2] == "req" { - fi.required = true - } - fi.setTag(f, tag, wt) - fi.setMarshaler(f, tags) -} - -func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { - fi.field = toField(f) - fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. - fi.isPointer = true - fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) - fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) - - ityp := f.Type // interface type - for _, o := range oneofImplementers { - t := reflect.TypeOf(o) - if !t.Implements(ityp) { - continue - } - sf := t.Elem().Field(0) // oneof implementer is a struct with a single field - tags := strings.Split(sf.Tag.Get("protobuf"), ",") - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - sizer, marshaler := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value - fi.oneofElems[t.Elem()] = &marshalElemInfo{ - wiretag: uint64(tag)<<3 | wt, - tagsize: SizeVarint(uint64(tag) << 3), - sizer: sizer, - marshaler: marshaler, - } - } -} - -// wiretype returns the wire encoding of the type. -func wiretype(encoding string) uint64 { - switch encoding { - case "fixed32": - return WireFixed32 - case "fixed64": - return WireFixed64 - case "varint", "zigzag32", "zigzag64": - return WireVarint - case "bytes": - return WireBytes - case "group": - return WireStartGroup - } - panic("unknown wire type " + encoding) -} - -// setTag fills up the tag (in wire format) and its size in the info of a field. -func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) { - fi.field = toField(f) - fi.wiretag = uint64(tag)<<3 | wt - fi.tagsize = SizeVarint(uint64(tag) << 3) -} - -// setMarshaler fills up the sizer and marshaler in the info of a field. -func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) { - switch f.Type.Kind() { - case reflect.Map: - // map field - fi.isPointer = true - fi.sizer, fi.marshaler = makeMapMarshaler(f) - return - case reflect.Ptr, reflect.Slice: - fi.isPointer = true - } - fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false) -} - -// typeMarshaler returns the sizer and marshaler of a given field. -// t is the type of the field. -// tags is the generated "protobuf" tag of the field. -// If nozero is true, zero value is not marshaled to the wire. -// If oneof is true, it is a oneof field. -func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) { - encoding := tags[0] - - pointer := false - slice := false - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - slice = true - t = t.Elem() - } - if t.Kind() == reflect.Ptr { - pointer = true - t = t.Elem() - } - - packed := false - proto3 := false - validateUTF8 := true - for i := 2; i < len(tags); i++ { - if tags[i] == "packed" { - packed = true - } - if tags[i] == "proto3" { - proto3 = true - } - } - validateUTF8 = validateUTF8 && proto3 - - switch t.Kind() { - case reflect.Bool: - if pointer { - return sizeBoolPtr, appendBoolPtr - } - if slice { - if packed { - return sizeBoolPackedSlice, appendBoolPackedSlice - } - return sizeBoolSlice, appendBoolSlice - } - if nozero { - return sizeBoolValueNoZero, appendBoolValueNoZero - } - return sizeBoolValue, appendBoolValue - case reflect.Uint32: - switch encoding { - case "fixed32": - if pointer { - return sizeFixed32Ptr, appendFixed32Ptr - } - if slice { - if packed { - return sizeFixed32PackedSlice, appendFixed32PackedSlice - } - return sizeFixed32Slice, appendFixed32Slice - } - if nozero { - return sizeFixed32ValueNoZero, appendFixed32ValueNoZero - } - return sizeFixed32Value, appendFixed32Value - case "varint": - if pointer { - return sizeVarint32Ptr, appendVarint32Ptr - } - if slice { - if packed { - return sizeVarint32PackedSlice, appendVarint32PackedSlice - } - return sizeVarint32Slice, appendVarint32Slice - } - if nozero { - return sizeVarint32ValueNoZero, appendVarint32ValueNoZero - } - return sizeVarint32Value, appendVarint32Value - } - case reflect.Int32: - switch encoding { - case "fixed32": - if pointer { - return sizeFixedS32Ptr, appendFixedS32Ptr - } - if slice { - if packed { - return sizeFixedS32PackedSlice, appendFixedS32PackedSlice - } - return sizeFixedS32Slice, appendFixedS32Slice - } - if nozero { - return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero - } - return sizeFixedS32Value, appendFixedS32Value - case "varint": - if pointer { - return sizeVarintS32Ptr, appendVarintS32Ptr - } - if slice { - if packed { - return sizeVarintS32PackedSlice, appendVarintS32PackedSlice - } - return sizeVarintS32Slice, appendVarintS32Slice - } - if nozero { - return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero - } - return sizeVarintS32Value, appendVarintS32Value - case "zigzag32": - if pointer { - return sizeZigzag32Ptr, appendZigzag32Ptr - } - if slice { - if packed { - return sizeZigzag32PackedSlice, appendZigzag32PackedSlice - } - return sizeZigzag32Slice, appendZigzag32Slice - } - if nozero { - return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero - } - return sizeZigzag32Value, appendZigzag32Value - } - case reflect.Uint64: - switch encoding { - case "fixed64": - if pointer { - return sizeFixed64Ptr, appendFixed64Ptr - } - if slice { - if packed { - return sizeFixed64PackedSlice, appendFixed64PackedSlice - } - return sizeFixed64Slice, appendFixed64Slice - } - if nozero { - return sizeFixed64ValueNoZero, appendFixed64ValueNoZero - } - return sizeFixed64Value, appendFixed64Value - case "varint": - if pointer { - return sizeVarint64Ptr, appendVarint64Ptr - } - if slice { - if packed { - return sizeVarint64PackedSlice, appendVarint64PackedSlice - } - return sizeVarint64Slice, appendVarint64Slice - } - if nozero { - return sizeVarint64ValueNoZero, appendVarint64ValueNoZero - } - return sizeVarint64Value, appendVarint64Value - } - case reflect.Int64: - switch encoding { - case "fixed64": - if pointer { - return sizeFixedS64Ptr, appendFixedS64Ptr - } - if slice { - if packed { - return sizeFixedS64PackedSlice, appendFixedS64PackedSlice - } - return sizeFixedS64Slice, appendFixedS64Slice - } - if nozero { - return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero - } - return sizeFixedS64Value, appendFixedS64Value - case "varint": - if pointer { - return sizeVarintS64Ptr, appendVarintS64Ptr - } - if slice { - if packed { - return sizeVarintS64PackedSlice, appendVarintS64PackedSlice - } - return sizeVarintS64Slice, appendVarintS64Slice - } - if nozero { - return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero - } - return sizeVarintS64Value, appendVarintS64Value - case "zigzag64": - if pointer { - return sizeZigzag64Ptr, appendZigzag64Ptr - } - if slice { - if packed { - return sizeZigzag64PackedSlice, appendZigzag64PackedSlice - } - return sizeZigzag64Slice, appendZigzag64Slice - } - if nozero { - return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero - } - return sizeZigzag64Value, appendZigzag64Value - } - case reflect.Float32: - if pointer { - return sizeFloat32Ptr, appendFloat32Ptr - } - if slice { - if packed { - return sizeFloat32PackedSlice, appendFloat32PackedSlice - } - return sizeFloat32Slice, appendFloat32Slice - } - if nozero { - return sizeFloat32ValueNoZero, appendFloat32ValueNoZero - } - return sizeFloat32Value, appendFloat32Value - case reflect.Float64: - if pointer { - return sizeFloat64Ptr, appendFloat64Ptr - } - if slice { - if packed { - return sizeFloat64PackedSlice, appendFloat64PackedSlice - } - return sizeFloat64Slice, appendFloat64Slice - } - if nozero { - return sizeFloat64ValueNoZero, appendFloat64ValueNoZero - } - return sizeFloat64Value, appendFloat64Value - case reflect.String: - if validateUTF8 { - if pointer { - return sizeStringPtr, appendUTF8StringPtr - } - if slice { - return sizeStringSlice, appendUTF8StringSlice - } - if nozero { - return sizeStringValueNoZero, appendUTF8StringValueNoZero - } - return sizeStringValue, appendUTF8StringValue - } - if pointer { - return sizeStringPtr, appendStringPtr - } - if slice { - return sizeStringSlice, appendStringSlice - } - if nozero { - return sizeStringValueNoZero, appendStringValueNoZero - } - return sizeStringValue, appendStringValue - case reflect.Slice: - if slice { - return sizeBytesSlice, appendBytesSlice - } - if oneof { - // Oneof bytes field may also have "proto3" tag. - // We want to marshal it as a oneof field. Do this - // check before the proto3 check. - return sizeBytesOneof, appendBytesOneof - } - if proto3 { - return sizeBytes3, appendBytes3 - } - return sizeBytes, appendBytes - case reflect.Struct: - switch encoding { - case "group": - if slice { - return makeGroupSliceMarshaler(getMarshalInfo(t)) - } - return makeGroupMarshaler(getMarshalInfo(t)) - case "bytes": - if slice { - return makeMessageSliceMarshaler(getMarshalInfo(t)) - } - return makeMessageMarshaler(getMarshalInfo(t)) - } - } - panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding)) -} - -// Below are functions to size/marshal a specific type of a field. -// They are stored in the field's info, and called by function pointers. -// They have type sizer or marshaler. - -func sizeFixed32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFixed32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFixed32Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - return (4 + tagsize) * len(s) -} -func sizeFixed32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFixedS32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFixedS32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFixedS32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - return (4 + tagsize) * len(s) -} -func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFloat32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int { - v := math.Float32bits(*ptr.toFloat32()) - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFloat32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toFloat32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFloat32Slice(ptr pointer, tagsize int) int { - s := *ptr.toFloat32Slice() - return (4 + tagsize) * len(s) -} -func sizeFloat32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toFloat32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFixed64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFixed64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFixed64Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - return (8 + tagsize) * len(s) -} -func sizeFixed64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeFixedS64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFixedS64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFixedS64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - return (8 + tagsize) * len(s) -} -func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeFloat64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int { - v := math.Float64bits(*ptr.toFloat64()) - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFloat64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toFloat64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFloat64Slice(ptr pointer, tagsize int) int { - s := *ptr.toFloat64Slice() - return (8 + tagsize) * len(s) -} -func sizeFloat64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toFloat64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeVarint32Value(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarint32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint32Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarint32Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarint32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarintS32Value(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarintS32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarint64Value(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - return SizeVarint(v) + tagsize -} -func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - if v == 0 { - return 0 - } - return SizeVarint(v) + tagsize -} -func sizeVarint64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint64Ptr() - if p == nil { - return 0 - } - return SizeVarint(*p) + tagsize -} -func sizeVarint64Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(v) + tagsize - } - return n -} -func sizeVarint64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(v) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarintS64Value(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarintS64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeZigzag32Value(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - v := *p - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize - } - return n -} -func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeZigzag64Value(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - v := *p - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize - } - return n -} -func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeBoolValue(_ pointer, tagsize int) int { - return 1 + tagsize -} -func sizeBoolValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toBool() - if !v { - return 0 - } - return 1 + tagsize -} -func sizeBoolPtr(ptr pointer, tagsize int) int { - p := *ptr.toBoolPtr() - if p == nil { - return 0 - } - return 1 + tagsize -} -func sizeBoolSlice(ptr pointer, tagsize int) int { - s := *ptr.toBoolSlice() - return (1 + tagsize) * len(s) -} -func sizeBoolPackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toBoolSlice() - if len(s) == 0 { - return 0 - } - return len(s) + SizeVarint(uint64(len(s))) + tagsize -} -func sizeStringValue(ptr pointer, tagsize int) int { - v := *ptr.toString() - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toString() - if v == "" { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringPtr(ptr pointer, tagsize int) int { - p := *ptr.toStringPtr() - if p == nil { - return 0 - } - v := *p - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringSlice(ptr pointer, tagsize int) int { - s := *ptr.toStringSlice() - n := 0 - for _, v := range s { - n += len(v) + SizeVarint(uint64(len(v))) + tagsize - } - return n -} -func sizeBytes(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - if v == nil { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytes3(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - if len(v) == 0 { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytesOneof(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytesSlice(ptr pointer, tagsize int) int { - s := *ptr.toBytesSlice() - n := 0 - for _, v := range s { - n += len(v) + SizeVarint(uint64(len(v))) + tagsize - } - return n -} - -// appendFixed32 appends an encoded fixed32 to b. -func appendFixed32(b []byte, v uint32) []byte { - b = append(b, - byte(v), - byte(v>>8), - byte(v>>16), - byte(v>>24)) - return b -} - -// appendFixed64 appends an encoded fixed64 to b. -func appendFixed64(b []byte, v uint64) []byte { - b = append(b, - byte(v), - byte(v>>8), - byte(v>>16), - byte(v>>24), - byte(v>>32), - byte(v>>40), - byte(v>>48), - byte(v>>56)) - return b -} - -// appendVarint appends an encoded varint to b. -func appendVarint(b []byte, v uint64) []byte { - // TODO: make 1-byte (maybe 2-byte) case inline-able, once we - // have non-leaf inliner. - switch { - case v < 1<<7: - b = append(b, byte(v)) - case v < 1<<14: - b = append(b, - byte(v&0x7f|0x80), - byte(v>>7)) - case v < 1<<21: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte(v>>14)) - case v < 1<<28: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte(v>>21)) - case v < 1<<35: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte(v>>28)) - case v < 1<<42: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte(v>>35)) - case v < 1<<49: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte(v>>42)) - case v < 1<<56: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte(v>>49)) - case v < 1<<63: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte((v>>49)&0x7f|0x80), - byte(v>>56)) - default: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte((v>>49)&0x7f|0x80), - byte((v>>56)&0x7f|0x80), - 1) - } - return b -} - -func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, *p) - return b, nil -} -func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - } - return b, nil -} -func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, v) - } - return b, nil -} -func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - return b, nil -} -func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - return b, nil -} -func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(*p)) - return b, nil -} -func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - } - return b, nil -} -func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, uint32(v)) - } - return b, nil -} -func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float32bits(*ptr.toFloat32()) - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float32bits(*ptr.toFloat32()) - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toFloat32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, math.Float32bits(*p)) - return b, nil -} -func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, math.Float32bits(v)) - } - return b, nil -} -func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, math.Float32bits(v)) - } - return b, nil -} -func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, *p) - return b, nil -} -func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - } - return b, nil -} -func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, v) - } - return b, nil -} -func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - return b, nil -} -func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - return b, nil -} -func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(*p)) - return b, nil -} -func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - } - return b, nil -} -func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, uint64(v)) - } - return b, nil -} -func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float64bits(*ptr.toFloat64()) - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float64bits(*ptr.toFloat64()) - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toFloat64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, math.Float64bits(*p)) - return b, nil -} -func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, math.Float64bits(v)) - } - return b, nil -} -func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, math.Float64bits(v)) - } - return b, nil -} -func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - return b, nil -} -func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - return b, nil -} -func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, *p) - return b, nil -} -func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - } - return b, nil -} -func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(v) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, v) - } - return b, nil -} -func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - v := *p - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - } - return b, nil -} -func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - } - return b, nil -} -func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - v := *p - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - } - return b, nil -} -func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - } - return b, nil -} -func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBool() - b = appendVarint(b, wiretag) - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - return b, nil -} -func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBool() - if !v { - return b, nil - } - b = appendVarint(b, wiretag) - b = append(b, 1) - return b, nil -} - -func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toBoolPtr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - if *p { - b = append(b, 1) - } else { - b = append(b, 0) - } - return b, nil -} -func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBoolSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - } - return b, nil -} -func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBoolSlice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(len(s))) - for _, v := range s { - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - } - return b, nil -} -func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toString() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toString() - if v == "" { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toStringPtr() - if p == nil { - return b, nil - } - v := *p - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toStringSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - return b, nil -} -func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - v := *ptr.toString() - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - v := *ptr.toString() - if v == "" { - return b, nil - } - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - p := *ptr.toStringPtr() - if p == nil { - return b, nil - } - v := *p - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - s := *ptr.toStringSlice() - for _, v := range s { - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - if v == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - if len(v) == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBytesSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - return b, nil -} - -// makeGroupMarshaler returns the sizer and marshaler for a group. -// u is the marshal info of the underlying message. -func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - p := ptr.getPointer() - if p.isNil() { - return 0 - } - return u.size(p) + 2*tagsize - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - p := ptr.getPointer() - if p.isNil() { - return b, nil - } - var err error - b = appendVarint(b, wiretag) // start group - b, err = u.marshal(b, p, deterministic) - b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group - return b, err - } -} - -// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice. -// u is the marshal info of the underlying message. -func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getPointerSlice() - n := 0 - for _, v := range s { - if v.isNil() { - continue - } - n += u.size(v) + 2*tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getPointerSlice() - var err error - var nerr nonFatal - for _, v := range s { - if v.isNil() { - return b, errRepeatedHasNil - } - b = appendVarint(b, wiretag) // start group - b, err = u.marshal(b, v, deterministic) - b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group - if !nerr.Merge(err) { - if err == ErrNil { - err = errRepeatedHasNil - } - return b, err - } - } - return b, nerr.E - } -} - -// makeMessageMarshaler returns the sizer and marshaler for a message field. -// u is the marshal info of the message. -func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - p := ptr.getPointer() - if p.isNil() { - return 0 - } - siz := u.size(p) - return siz + SizeVarint(uint64(siz)) + tagsize - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - p := ptr.getPointer() - if p.isNil() { - return b, nil - } - b = appendVarint(b, wiretag) - siz := u.cachedsize(p) - b = appendVarint(b, uint64(siz)) - return u.marshal(b, p, deterministic) - } -} - -// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice. -// u is the marshal info of the message. -func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getPointerSlice() - n := 0 - for _, v := range s { - if v.isNil() { - continue - } - siz := u.size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getPointerSlice() - var err error - var nerr nonFatal - for _, v := range s { - if v.isNil() { - return b, errRepeatedHasNil - } - b = appendVarint(b, wiretag) - siz := u.cachedsize(v) - b = appendVarint(b, uint64(siz)) - b, err = u.marshal(b, v, deterministic) - - if !nerr.Merge(err) { - if err == ErrNil { - err = errRepeatedHasNil - } - return b, err - } - } - return b, nerr.E - } -} - -// makeMapMarshaler returns the sizer and marshaler for a map field. -// f is the pointer to the reflect data structure of the field. -func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { - // figure out key and value type - t := f.Type - keyType := t.Key() - valType := t.Elem() - keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",") - valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") - keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map - valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map - keyWireTag := 1<<3 | wiretype(keyTags[0]) - valWireTag := 2<<3 | wiretype(valTags[0]) - - // We create an interface to get the addresses of the map key and value. - // If value is pointer-typed, the interface is a direct interface, the - // idata itself is the value. Otherwise, the idata is the pointer to the - // value. - // Key cannot be pointer-typed. - valIsPtr := valType.Kind() == reflect.Ptr - - // If value is a message with nested maps, calling - // valSizer in marshal may be quadratic. We should use - // cached version in marshal (but not in size). - // If value is not message type, we don't have size cache, - // but it cannot be nested either. Just use valSizer. - valCachedSizer := valSizer - if valIsPtr && valType.Elem().Kind() == reflect.Struct { - u := getMarshalInfo(valType.Elem()) - valCachedSizer = func(ptr pointer, tagsize int) int { - // Same as message sizer, but use cache. - p := ptr.getPointer() - if p.isNil() { - return 0 - } - siz := u.cachedsize(p) - return siz + SizeVarint(uint64(siz)) + tagsize - } - } - return func(ptr pointer, tagsize int) int { - m := ptr.asPointerTo(t).Elem() // the map - n := 0 - for _, k := range m.MapKeys() { - ki := k.Interface() - vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value - siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) { - m := ptr.asPointerTo(t).Elem() // the map - var err error - keys := m.MapKeys() - if len(keys) > 1 && deterministic { - sort.Sort(mapKeys(keys)) - } - - var nerr nonFatal - for _, k := range keys { - ki := k.Interface() - vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value - b = appendVarint(b, tag) - siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) - b = appendVarint(b, uint64(siz)) - b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic) - if !nerr.Merge(err) { - return b, err - } - b, err = valMarshaler(b, vaddr, valWireTag, deterministic) - if err != ErrNil && !nerr.Merge(err) { // allow nil value in map - return b, err - } - } - return b, nerr.E - } -} - -// makeOneOfMarshaler returns the sizer and marshaler for a oneof field. -// fi is the marshal info of the field. -// f is the pointer to the reflect data structure of the field. -func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) { - // Oneof field is an interface. We need to get the actual data type on the fly. - t := f.Type - return func(ptr pointer, _ int) int { - p := ptr.getInterfacePointer() - if p.isNil() { - return 0 - } - v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct - telem := v.Type() - e := fi.oneofElems[telem] - return e.sizer(p, e.tagsize) - }, - func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) { - p := ptr.getInterfacePointer() - if p.isNil() { - return b, nil - } - v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct - telem := v.Type() - if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() { - return b, errOneofHasNil - } - e := fi.oneofElems[telem] - return e.marshaler(b, p, e.wiretag, deterministic) - } -} - -// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field. -func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { - m, mu := ext.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - - n := 0 - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - n += ei.sizer(p, ei.tagsize) - } - mu.Unlock() - return n -} - -// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b. -func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { - m, mu := ext.extensionsRead() - if m == nil { - return b, nil - } - mu.Lock() - defer mu.Unlock() - - var err error - var nerr nonFatal - - // Fast-path for common cases: zero or one extensions. - // Don't bother sorting the keys. - if len(m) <= 1 { - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E - } - - // Sort the keys to provide a deterministic encoding. - // Not sure this is required, but the old code does it. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, k := range keys { - e := m[int32(k)] - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// message set format is: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } - -// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field -// in message set format (above). -func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { - m, mu := ext.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - - n := 0 - for id, e := range m { - n += 2 // start group, end group. tag = 1 (size=1) - n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - siz := len(msgWithLen) - n += siz + 1 // message, tag = 3 (size=1) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - n += ei.sizer(p, 1) // message, tag = 3 (size=1) - } - mu.Unlock() - return n -} - -// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above) -// to the end of byte slice b. -func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { - m, mu := ext.extensionsRead() - if m == nil { - return b, nil - } - mu.Lock() - defer mu.Unlock() - - var err error - var nerr nonFatal - - // Fast-path for common cases: zero or one extensions. - // Don't bother sorting the keys. - if len(m) <= 1 { - for id, e := range m { - b = append(b, 1<<3|WireStartGroup) - b = append(b, 2<<3|WireVarint) - b = appendVarint(b, uint64(id)) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - b = append(b, 3<<3|WireBytes) - b = append(b, msgWithLen...) - b = append(b, 1<<3|WireEndGroup) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) - if !nerr.Merge(err) { - return b, err - } - b = append(b, 1<<3|WireEndGroup) - } - return b, nerr.E - } - - // Sort the keys to provide a deterministic encoding. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, id := range keys { - e := m[int32(id)] - b = append(b, 1<<3|WireStartGroup) - b = append(b, 2<<3|WireVarint) - b = appendVarint(b, uint64(id)) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - b = append(b, 3<<3|WireBytes) - b = append(b, msgWithLen...) - b = append(b, 1<<3|WireEndGroup) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) - b = append(b, 1<<3|WireEndGroup) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// sizeV1Extensions computes the size of encoded data for a V1-API extension field. -func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { - if m == nil { - return 0 - } - - n := 0 - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - n += ei.sizer(p, ei.tagsize) - } - return n -} - -// appendV1Extensions marshals a V1-API extension field to the end of byte slice b. -func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) { - if m == nil { - return b, nil - } - - // Sort the keys to provide a deterministic encoding. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - var err error - var nerr nonFatal - for _, k := range keys { - e := m[int32(k)] - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// newMarshaler is the interface representing objects that can marshal themselves. -// -// This exists to support protoc-gen-go generated messages. -// The proto package will stop type-asserting to this interface in the future. -// -// DO NOT DEPEND ON THIS. -type newMarshaler interface { - XXX_Size() int - XXX_Marshal(b []byte, deterministic bool) ([]byte, error) -} - -// Size returns the encoded size of a protocol buffer message. -// This is the main entry point. -func Size(pb Message) int { - if m, ok := pb.(newMarshaler); ok { - return m.XXX_Size() - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - b, _ := m.Marshal() - return len(b) - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return 0 - } - var info InternalMessageInfo - return info.Size(pb) -} - -// Marshal takes a protocol buffer message -// and encodes it into the wire format, returning the data. -// This is the main entry point. -func Marshal(pb Message) ([]byte, error) { - if m, ok := pb.(newMarshaler); ok { - siz := m.XXX_Size() - b := make([]byte, 0, siz) - return m.XXX_Marshal(b, false) - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - return m.Marshal() - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return nil, ErrNil - } - var info InternalMessageInfo - siz := info.Size(pb) - b := make([]byte, 0, siz) - return info.Marshal(b, pb, false) -} - -// Marshal takes a protocol buffer message -// and encodes it into the wire format, writing the result to the -// Buffer. -// This is an alternative entry point. It is not necessary to use -// a Buffer for most applications. -func (p *Buffer) Marshal(pb Message) error { - var err error - if m, ok := pb.(newMarshaler); ok { - siz := m.XXX_Size() - p.grow(siz) // make sure buf has enough capacity - p.buf, err = m.XXX_Marshal(p.buf, p.deterministic) - return err - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - b, err := m.Marshal() - p.buf = append(p.buf, b...) - return err - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return ErrNil - } - var info InternalMessageInfo - siz := info.Size(pb) - p.grow(siz) // make sure buf has enough capacity - p.buf, err = info.Marshal(p.buf, pb, p.deterministic) - return err -} - -// grow grows the buffer's capacity, if necessary, to guarantee space for -// another n bytes. After grow(n), at least n bytes can be written to the -// buffer without another allocation. -func (p *Buffer) grow(n int) { - need := len(p.buf) + n - if need <= cap(p.buf) { - return - } - newCap := len(p.buf) * 2 - if newCap < need { - newCap = need - } - p.buf = append(make([]byte, 0, newCap), p.buf...) -} diff --git a/cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/table_merge.go b/cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/table_merge.go deleted file mode 100644 index 5525def..0000000 --- a/cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/table_merge.go +++ /dev/null @@ -1,654 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "reflect" - "strings" - "sync" - "sync/atomic" -) - -// Merge merges the src message into dst. -// This assumes that dst and src of the same type and are non-nil. -func (a *InternalMessageInfo) Merge(dst, src Message) { - mi := atomicLoadMergeInfo(&a.merge) - if mi == nil { - mi = getMergeInfo(reflect.TypeOf(dst).Elem()) - atomicStoreMergeInfo(&a.merge, mi) - } - mi.merge(toPointer(&dst), toPointer(&src)) -} - -type mergeInfo struct { - typ reflect.Type - - initialized int32 // 0: only typ is valid, 1: everything is valid - lock sync.Mutex - - fields []mergeFieldInfo - unrecognized field // Offset of XXX_unrecognized -} - -type mergeFieldInfo struct { - field field // Offset of field, guaranteed to be valid - - // isPointer reports whether the value in the field is a pointer. - // This is true for the following situations: - // * Pointer to struct - // * Pointer to basic type (proto2 only) - // * Slice (first value in slice header is a pointer) - // * String (first value in string header is a pointer) - isPointer bool - - // basicWidth reports the width of the field assuming that it is directly - // embedded in the struct (as is the case for basic types in proto3). - // The possible values are: - // 0: invalid - // 1: bool - // 4: int32, uint32, float32 - // 8: int64, uint64, float64 - basicWidth int - - // Where dst and src are pointers to the types being merged. - merge func(dst, src pointer) -} - -var ( - mergeInfoMap = map[reflect.Type]*mergeInfo{} - mergeInfoLock sync.Mutex -) - -func getMergeInfo(t reflect.Type) *mergeInfo { - mergeInfoLock.Lock() - defer mergeInfoLock.Unlock() - mi := mergeInfoMap[t] - if mi == nil { - mi = &mergeInfo{typ: t} - mergeInfoMap[t] = mi - } - return mi -} - -// merge merges src into dst assuming they are both of type *mi.typ. -func (mi *mergeInfo) merge(dst, src pointer) { - if dst.isNil() { - panic("proto: nil destination") - } - if src.isNil() { - return // Nothing to do. - } - - if atomic.LoadInt32(&mi.initialized) == 0 { - mi.computeMergeInfo() - } - - for _, fi := range mi.fields { - sfp := src.offset(fi.field) - - // As an optimization, we can avoid the merge function call cost - // if we know for sure that the source will have no effect - // by checking if it is the zero value. - if unsafeAllowed { - if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string - continue - } - if fi.basicWidth > 0 { - switch { - case fi.basicWidth == 1 && !*sfp.toBool(): - continue - case fi.basicWidth == 4 && *sfp.toUint32() == 0: - continue - case fi.basicWidth == 8 && *sfp.toUint64() == 0: - continue - } - } - } - - dfp := dst.offset(fi.field) - fi.merge(dfp, sfp) - } - - // TODO: Make this faster? - out := dst.asPointerTo(mi.typ).Elem() - in := src.asPointerTo(mi.typ).Elem() - if emIn, err := extendable(in.Addr().Interface()); err == nil { - emOut, _ := extendable(out.Addr().Interface()) - mIn, muIn := emIn.extensionsRead() - if mIn != nil { - mOut := emOut.extensionsWrite() - muIn.Lock() - mergeExtension(mOut, mIn) - muIn.Unlock() - } - } - - if mi.unrecognized.IsValid() { - if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 { - *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...) - } - } -} - -func (mi *mergeInfo) computeMergeInfo() { - mi.lock.Lock() - defer mi.lock.Unlock() - if mi.initialized != 0 { - return - } - t := mi.typ - n := t.NumField() - - props := GetProperties(t) - for i := 0; i < n; i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - - mfi := mergeFieldInfo{field: toField(&f)} - tf := f.Type - - // As an optimization, we can avoid the merge function call cost - // if we know for sure that the source will have no effect - // by checking if it is the zero value. - if unsafeAllowed { - switch tf.Kind() { - case reflect.Ptr, reflect.Slice, reflect.String: - // As a special case, we assume slices and strings are pointers - // since we know that the first field in the SliceSlice or - // StringHeader is a data pointer. - mfi.isPointer = true - case reflect.Bool: - mfi.basicWidth = 1 - case reflect.Int32, reflect.Uint32, reflect.Float32: - mfi.basicWidth = 4 - case reflect.Int64, reflect.Uint64, reflect.Float64: - mfi.basicWidth = 8 - } - } - - // Unwrap tf to get at its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic("both pointer and slice for basic type in " + tf.Name()) - } - - switch tf.Kind() { - case reflect.Int32: - switch { - case isSlice: // E.g., []int32 - mfi.merge = func(dst, src pointer) { - // NOTE: toInt32Slice is not defined (see pointer_reflect.go). - /* - sfsp := src.toInt32Slice() - if *sfsp != nil { - dfsp := dst.toInt32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []int64{} - } - } - */ - sfs := src.getInt32Slice() - if sfs != nil { - dfs := dst.getInt32Slice() - dfs = append(dfs, sfs...) - if dfs == nil { - dfs = []int32{} - } - dst.setInt32Slice(dfs) - } - } - case isPointer: // E.g., *int32 - mfi.merge = func(dst, src pointer) { - // NOTE: toInt32Ptr is not defined (see pointer_reflect.go). - /* - sfpp := src.toInt32Ptr() - if *sfpp != nil { - dfpp := dst.toInt32Ptr() - if *dfpp == nil { - *dfpp = Int32(**sfpp) - } else { - **dfpp = **sfpp - } - } - */ - sfp := src.getInt32Ptr() - if sfp != nil { - dfp := dst.getInt32Ptr() - if dfp == nil { - dst.setInt32Ptr(*sfp) - } else { - *dfp = *sfp - } - } - } - default: // E.g., int32 - mfi.merge = func(dst, src pointer) { - if v := *src.toInt32(); v != 0 { - *dst.toInt32() = v - } - } - } - case reflect.Int64: - switch { - case isSlice: // E.g., []int64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toInt64Slice() - if *sfsp != nil { - dfsp := dst.toInt64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []int64{} - } - } - } - case isPointer: // E.g., *int64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toInt64Ptr() - if *sfpp != nil { - dfpp := dst.toInt64Ptr() - if *dfpp == nil { - *dfpp = Int64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., int64 - mfi.merge = func(dst, src pointer) { - if v := *src.toInt64(); v != 0 { - *dst.toInt64() = v - } - } - } - case reflect.Uint32: - switch { - case isSlice: // E.g., []uint32 - mfi.merge = func(dst, src pointer) { - sfsp := src.toUint32Slice() - if *sfsp != nil { - dfsp := dst.toUint32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []uint32{} - } - } - } - case isPointer: // E.g., *uint32 - mfi.merge = func(dst, src pointer) { - sfpp := src.toUint32Ptr() - if *sfpp != nil { - dfpp := dst.toUint32Ptr() - if *dfpp == nil { - *dfpp = Uint32(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., uint32 - mfi.merge = func(dst, src pointer) { - if v := *src.toUint32(); v != 0 { - *dst.toUint32() = v - } - } - } - case reflect.Uint64: - switch { - case isSlice: // E.g., []uint64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toUint64Slice() - if *sfsp != nil { - dfsp := dst.toUint64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []uint64{} - } - } - } - case isPointer: // E.g., *uint64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toUint64Ptr() - if *sfpp != nil { - dfpp := dst.toUint64Ptr() - if *dfpp == nil { - *dfpp = Uint64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., uint64 - mfi.merge = func(dst, src pointer) { - if v := *src.toUint64(); v != 0 { - *dst.toUint64() = v - } - } - } - case reflect.Float32: - switch { - case isSlice: // E.g., []float32 - mfi.merge = func(dst, src pointer) { - sfsp := src.toFloat32Slice() - if *sfsp != nil { - dfsp := dst.toFloat32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []float32{} - } - } - } - case isPointer: // E.g., *float32 - mfi.merge = func(dst, src pointer) { - sfpp := src.toFloat32Ptr() - if *sfpp != nil { - dfpp := dst.toFloat32Ptr() - if *dfpp == nil { - *dfpp = Float32(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., float32 - mfi.merge = func(dst, src pointer) { - if v := *src.toFloat32(); v != 0 { - *dst.toFloat32() = v - } - } - } - case reflect.Float64: - switch { - case isSlice: // E.g., []float64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toFloat64Slice() - if *sfsp != nil { - dfsp := dst.toFloat64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []float64{} - } - } - } - case isPointer: // E.g., *float64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toFloat64Ptr() - if *sfpp != nil { - dfpp := dst.toFloat64Ptr() - if *dfpp == nil { - *dfpp = Float64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., float64 - mfi.merge = func(dst, src pointer) { - if v := *src.toFloat64(); v != 0 { - *dst.toFloat64() = v - } - } - } - case reflect.Bool: - switch { - case isSlice: // E.g., []bool - mfi.merge = func(dst, src pointer) { - sfsp := src.toBoolSlice() - if *sfsp != nil { - dfsp := dst.toBoolSlice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []bool{} - } - } - } - case isPointer: // E.g., *bool - mfi.merge = func(dst, src pointer) { - sfpp := src.toBoolPtr() - if *sfpp != nil { - dfpp := dst.toBoolPtr() - if *dfpp == nil { - *dfpp = Bool(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., bool - mfi.merge = func(dst, src pointer) { - if v := *src.toBool(); v { - *dst.toBool() = v - } - } - } - case reflect.String: - switch { - case isSlice: // E.g., []string - mfi.merge = func(dst, src pointer) { - sfsp := src.toStringSlice() - if *sfsp != nil { - dfsp := dst.toStringSlice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []string{} - } - } - } - case isPointer: // E.g., *string - mfi.merge = func(dst, src pointer) { - sfpp := src.toStringPtr() - if *sfpp != nil { - dfpp := dst.toStringPtr() - if *dfpp == nil { - *dfpp = String(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., string - mfi.merge = func(dst, src pointer) { - if v := *src.toString(); v != "" { - *dst.toString() = v - } - } - } - case reflect.Slice: - isProto3 := props.Prop[i].proto3 - switch { - case isPointer: - panic("bad pointer in byte slice case in " + tf.Name()) - case tf.Elem().Kind() != reflect.Uint8: - panic("bad element kind in byte slice case in " + tf.Name()) - case isSlice: // E.g., [][]byte - mfi.merge = func(dst, src pointer) { - sbsp := src.toBytesSlice() - if *sbsp != nil { - dbsp := dst.toBytesSlice() - for _, sb := range *sbsp { - if sb == nil { - *dbsp = append(*dbsp, nil) - } else { - *dbsp = append(*dbsp, append([]byte{}, sb...)) - } - } - if *dbsp == nil { - *dbsp = [][]byte{} - } - } - } - default: // E.g., []byte - mfi.merge = func(dst, src pointer) { - sbp := src.toBytes() - if *sbp != nil { - dbp := dst.toBytes() - if !isProto3 || len(*sbp) > 0 { - *dbp = append([]byte{}, *sbp...) - } - } - } - } - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("message field %s without pointer", tf)) - case isSlice: // E.g., []*pb.T - mi := getMergeInfo(tf) - mfi.merge = func(dst, src pointer) { - sps := src.getPointerSlice() - if sps != nil { - dps := dst.getPointerSlice() - for _, sp := range sps { - var dp pointer - if !sp.isNil() { - dp = valToPointer(reflect.New(tf)) - mi.merge(dp, sp) - } - dps = append(dps, dp) - } - if dps == nil { - dps = []pointer{} - } - dst.setPointerSlice(dps) - } - } - default: // E.g., *pb.T - mi := getMergeInfo(tf) - mfi.merge = func(dst, src pointer) { - sp := src.getPointer() - if !sp.isNil() { - dp := dst.getPointer() - if dp.isNil() { - dp = valToPointer(reflect.New(tf)) - dst.setPointer(dp) - } - mi.merge(dp, sp) - } - } - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic("bad pointer or slice in map case in " + tf.Name()) - default: // E.g., map[K]V - mfi.merge = func(dst, src pointer) { - sm := src.asPointerTo(tf).Elem() - if sm.Len() == 0 { - return - } - dm := dst.asPointerTo(tf).Elem() - if dm.IsNil() { - dm.Set(reflect.MakeMap(tf)) - } - - switch tf.Elem().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - val = reflect.ValueOf(Clone(val.Interface().(Message))) - dm.SetMapIndex(key, val) - } - case reflect.Slice: // E.g. Bytes type (e.g., []byte) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - dm.SetMapIndex(key, val) - } - default: // Basic type (e.g., string) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - dm.SetMapIndex(key, val) - } - } - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic("bad pointer or slice in interface case in " + tf.Name()) - default: // E.g., interface{} - // TODO: Make this faster? - mfi.merge = func(dst, src pointer) { - su := src.asPointerTo(tf).Elem() - if !su.IsNil() { - du := dst.asPointerTo(tf).Elem() - typ := su.Elem().Type() - if du.IsNil() || du.Elem().Type() != typ { - du.Set(reflect.New(typ.Elem())) // Initialize interface if empty - } - sv := su.Elem().Elem().Field(0) - if sv.Kind() == reflect.Ptr && sv.IsNil() { - return - } - dv := du.Elem().Elem().Field(0) - if dv.Kind() == reflect.Ptr && dv.IsNil() { - dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty - } - switch sv.Type().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - Merge(dv.Interface().(Message), sv.Interface().(Message)) - case reflect.Slice: // E.g. Bytes type (e.g., []byte) - dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...))) - default: // Basic type (e.g., string) - dv.Set(sv) - } - } - } - } - default: - panic(fmt.Sprintf("merger not found for type:%s", tf)) - } - mi.fields = append(mi.fields, mfi) - } - - mi.unrecognized = invalidField - if f, ok := t.FieldByName("XXX_unrecognized"); ok { - if f.Type != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - mi.unrecognized = toField(&f) - } - - atomic.StoreInt32(&mi.initialized, 1) -} diff --git a/cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/table_unmarshal.go deleted file mode 100644 index acee2fc..0000000 --- a/cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/table_unmarshal.go +++ /dev/null @@ -1,2053 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "errors" - "fmt" - "io" - "math" - "reflect" - "strconv" - "strings" - "sync" - "sync/atomic" - "unicode/utf8" -) - -// Unmarshal is the entry point from the generated .pb.go files. -// This function is not intended to be used by non-generated code. -// This function is not subject to any compatibility guarantee. -// msg contains a pointer to a protocol buffer struct. -// b is the data to be unmarshaled into the protocol buffer. -// a is a pointer to a place to store cached unmarshal information. -func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error { - // Load the unmarshal information for this message type. - // The atomic load ensures memory consistency. - u := atomicLoadUnmarshalInfo(&a.unmarshal) - if u == nil { - // Slow path: find unmarshal info for msg, update a with it. - u = getUnmarshalInfo(reflect.TypeOf(msg).Elem()) - atomicStoreUnmarshalInfo(&a.unmarshal, u) - } - // Then do the unmarshaling. - err := u.unmarshal(toPointer(&msg), b) - return err -} - -type unmarshalInfo struct { - typ reflect.Type // type of the protobuf struct - - // 0 = only typ field is initialized - // 1 = completely initialized - initialized int32 - lock sync.Mutex // prevents double initialization - dense []unmarshalFieldInfo // fields indexed by tag # - sparse map[uint64]unmarshalFieldInfo // fields indexed by tag # - reqFields []string // names of required fields - reqMask uint64 // 1< 0 { - // Read tag and wire type. - // Special case 1 and 2 byte varints. - var x uint64 - if b[0] < 128 { - x = uint64(b[0]) - b = b[1:] - } else if len(b) >= 2 && b[1] < 128 { - x = uint64(b[0]&0x7f) + uint64(b[1])<<7 - b = b[2:] - } else { - var n int - x, n = decodeVarint(b) - if n == 0 { - return io.ErrUnexpectedEOF - } - b = b[n:] - } - tag := x >> 3 - wire := int(x) & 7 - - // Dispatch on the tag to one of the unmarshal* functions below. - var f unmarshalFieldInfo - if tag < uint64(len(u.dense)) { - f = u.dense[tag] - } else { - f = u.sparse[tag] - } - if fn := f.unmarshal; fn != nil { - var err error - b, err = fn(b, m.offset(f.field), wire) - if err == nil { - reqMask |= f.reqMask - continue - } - if r, ok := err.(*RequiredNotSetError); ok { - // Remember this error, but keep parsing. We need to produce - // a full parse even if a required field is missing. - if errLater == nil { - errLater = r - } - reqMask |= f.reqMask - continue - } - if err != errInternalBadWireType { - if err == errInvalidUTF8 { - if errLater == nil { - fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name - errLater = &invalidUTF8Error{fullName} - } - continue - } - return err - } - // Fragments with bad wire type are treated as unknown fields. - } - - // Unknown tag. - if !u.unrecognized.IsValid() { - // Don't keep unrecognized data; just skip it. - var err error - b, err = skipField(b, wire) - if err != nil { - return err - } - continue - } - // Keep unrecognized data around. - // maybe in extensions, maybe in the unrecognized field. - z := m.offset(u.unrecognized).toBytes() - var emap map[int32]Extension - var e Extension - for _, r := range u.extensionRanges { - if uint64(r.Start) <= tag && tag <= uint64(r.End) { - if u.extensions.IsValid() { - mp := m.offset(u.extensions).toExtensions() - emap = mp.extensionsWrite() - e = emap[int32(tag)] - z = &e.enc - break - } - if u.oldExtensions.IsValid() { - p := m.offset(u.oldExtensions).toOldExtensions() - emap = *p - if emap == nil { - emap = map[int32]Extension{} - *p = emap - } - e = emap[int32(tag)] - z = &e.enc - break - } - panic("no extensions field available") - } - } - - // Use wire type to skip data. - var err error - b0 := b - b, err = skipField(b, wire) - if err != nil { - return err - } - *z = encodeVarint(*z, tag<<3|uint64(wire)) - *z = append(*z, b0[:len(b0)-len(b)]...) - - if emap != nil { - emap[int32(tag)] = e - } - } - if reqMask != u.reqMask && errLater == nil { - // A required field of this message is missing. - for _, n := range u.reqFields { - if reqMask&1 == 0 { - errLater = &RequiredNotSetError{n} - } - reqMask >>= 1 - } - } - return errLater -} - -// computeUnmarshalInfo fills in u with information for use -// in unmarshaling protocol buffers of type u.typ. -func (u *unmarshalInfo) computeUnmarshalInfo() { - u.lock.Lock() - defer u.lock.Unlock() - if u.initialized != 0 { - return - } - t := u.typ - n := t.NumField() - - // Set up the "not found" value for the unrecognized byte buffer. - // This is the default for proto3. - u.unrecognized = invalidField - u.extensions = invalidField - u.oldExtensions = invalidField - - // List of the generated type and offset for each oneof field. - type oneofField struct { - ityp reflect.Type // interface type of oneof field - field field // offset in containing message - } - var oneofFields []oneofField - - for i := 0; i < n; i++ { - f := t.Field(i) - if f.Name == "XXX_unrecognized" { - // The byte slice used to hold unrecognized input is special. - if f.Type != reflect.TypeOf(([]byte)(nil)) { - panic("bad type for XXX_unrecognized field: " + f.Type.Name()) - } - u.unrecognized = toField(&f) - continue - } - if f.Name == "XXX_InternalExtensions" { - // Ditto here. - if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) { - panic("bad type for XXX_InternalExtensions field: " + f.Type.Name()) - } - u.extensions = toField(&f) - if f.Tag.Get("protobuf_messageset") == "1" { - u.isMessageSet = true - } - continue - } - if f.Name == "XXX_extensions" { - // An older form of the extensions field. - if f.Type != reflect.TypeOf((map[int32]Extension)(nil)) { - panic("bad type for XXX_extensions field: " + f.Type.Name()) - } - u.oldExtensions = toField(&f) - continue - } - if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" { - continue - } - - oneof := f.Tag.Get("protobuf_oneof") - if oneof != "" { - oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)}) - // The rest of oneof processing happens below. - continue - } - - tags := f.Tag.Get("protobuf") - tagArray := strings.Split(tags, ",") - if len(tagArray) < 2 { - panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags) - } - tag, err := strconv.Atoi(tagArray[1]) - if err != nil { - panic("protobuf tag field not an integer: " + tagArray[1]) - } - - name := "" - for _, tag := range tagArray[3:] { - if strings.HasPrefix(tag, "name=") { - name = tag[5:] - } - } - - // Extract unmarshaling function from the field (its type and tags). - unmarshal := fieldUnmarshaler(&f) - - // Required field? - var reqMask uint64 - if tagArray[2] == "req" { - bit := len(u.reqFields) - u.reqFields = append(u.reqFields, name) - reqMask = uint64(1) << uint(bit) - // TODO: if we have more than 64 required fields, we end up - // not verifying that all required fields are present. - // Fix this, perhaps using a count of required fields? - } - - // Store the info in the correct slot in the message. - u.setTag(tag, toField(&f), unmarshal, reqMask, name) - } - - // Find any types associated with oneof fields. - var oneofImplementers []interface{} - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oneofImplementers = m.XXX_OneofFuncs() - case oneofWrappersIface: - oneofImplementers = m.XXX_OneofWrappers() - } - for _, v := range oneofImplementers { - tptr := reflect.TypeOf(v) // *Msg_X - typ := tptr.Elem() // Msg_X - - f := typ.Field(0) // oneof implementers have one field - baseUnmarshal := fieldUnmarshaler(&f) - tags := strings.Split(f.Tag.Get("protobuf"), ",") - fieldNum, err := strconv.Atoi(tags[1]) - if err != nil { - panic("protobuf tag field not an integer: " + tags[1]) - } - var name string - for _, tag := range tags { - if strings.HasPrefix(tag, "name=") { - name = strings.TrimPrefix(tag, "name=") - break - } - } - - // Find the oneof field that this struct implements. - // Might take O(n^2) to process all of the oneofs, but who cares. - for _, of := range oneofFields { - if tptr.Implements(of.ityp) { - // We have found the corresponding interface for this struct. - // That lets us know where this struct should be stored - // when we encounter it during unmarshaling. - unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) - u.setTag(fieldNum, of.field, unmarshal, 0, name) - } - } - - } - - // Get extension ranges, if any. - fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") - if fn.IsValid() { - if !u.extensions.IsValid() && !u.oldExtensions.IsValid() { - panic("a message with extensions, but no extensions field in " + t.Name()) - } - u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange) - } - - // Explicitly disallow tag 0. This will ensure we flag an error - // when decoding a buffer of all zeros. Without this code, we - // would decode and skip an all-zero buffer of even length. - // [0 0] is [tag=0/wiretype=varint varint-encoded-0]. - u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) { - return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w) - }, 0, "") - - // Set mask for required field check. - u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here? - for len(u.dense) <= tag { - u.dense = append(u.dense, unmarshalFieldInfo{}) - } - u.dense[tag] = i - return - } - if u.sparse == nil { - u.sparse = map[uint64]unmarshalFieldInfo{} - } - u.sparse[uint64(tag)] = i -} - -// fieldUnmarshaler returns an unmarshaler for the given field. -func fieldUnmarshaler(f *reflect.StructField) unmarshaler { - if f.Type.Kind() == reflect.Map { - return makeUnmarshalMap(f) - } - return typeUnmarshaler(f.Type, f.Tag.Get("protobuf")) -} - -// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair. -func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { - tagArray := strings.Split(tags, ",") - encoding := tagArray[0] - name := "unknown" - proto3 := false - validateUTF8 := true - for _, tag := range tagArray[3:] { - if strings.HasPrefix(tag, "name=") { - name = tag[5:] - } - if tag == "proto3" { - proto3 = true - } - } - validateUTF8 = validateUTF8 && proto3 - - // Figure out packaging (pointer, slice, or both) - slice := false - pointer := false - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - slice = true - t = t.Elem() - } - if t.Kind() == reflect.Ptr { - pointer = true - t = t.Elem() - } - - // We'll never have both pointer and slice for basic types. - if pointer && slice && t.Kind() != reflect.Struct { - panic("both pointer and slice for basic type in " + t.Name()) - } - - switch t.Kind() { - case reflect.Bool: - if pointer { - return unmarshalBoolPtr - } - if slice { - return unmarshalBoolSlice - } - return unmarshalBoolValue - case reflect.Int32: - switch encoding { - case "fixed32": - if pointer { - return unmarshalFixedS32Ptr - } - if slice { - return unmarshalFixedS32Slice - } - return unmarshalFixedS32Value - case "varint": - // this could be int32 or enum - if pointer { - return unmarshalInt32Ptr - } - if slice { - return unmarshalInt32Slice - } - return unmarshalInt32Value - case "zigzag32": - if pointer { - return unmarshalSint32Ptr - } - if slice { - return unmarshalSint32Slice - } - return unmarshalSint32Value - } - case reflect.Int64: - switch encoding { - case "fixed64": - if pointer { - return unmarshalFixedS64Ptr - } - if slice { - return unmarshalFixedS64Slice - } - return unmarshalFixedS64Value - case "varint": - if pointer { - return unmarshalInt64Ptr - } - if slice { - return unmarshalInt64Slice - } - return unmarshalInt64Value - case "zigzag64": - if pointer { - return unmarshalSint64Ptr - } - if slice { - return unmarshalSint64Slice - } - return unmarshalSint64Value - } - case reflect.Uint32: - switch encoding { - case "fixed32": - if pointer { - return unmarshalFixed32Ptr - } - if slice { - return unmarshalFixed32Slice - } - return unmarshalFixed32Value - case "varint": - if pointer { - return unmarshalUint32Ptr - } - if slice { - return unmarshalUint32Slice - } - return unmarshalUint32Value - } - case reflect.Uint64: - switch encoding { - case "fixed64": - if pointer { - return unmarshalFixed64Ptr - } - if slice { - return unmarshalFixed64Slice - } - return unmarshalFixed64Value - case "varint": - if pointer { - return unmarshalUint64Ptr - } - if slice { - return unmarshalUint64Slice - } - return unmarshalUint64Value - } - case reflect.Float32: - if pointer { - return unmarshalFloat32Ptr - } - if slice { - return unmarshalFloat32Slice - } - return unmarshalFloat32Value - case reflect.Float64: - if pointer { - return unmarshalFloat64Ptr - } - if slice { - return unmarshalFloat64Slice - } - return unmarshalFloat64Value - case reflect.Map: - panic("map type in typeUnmarshaler in " + t.Name()) - case reflect.Slice: - if pointer { - panic("bad pointer in slice case in " + t.Name()) - } - if slice { - return unmarshalBytesSlice - } - return unmarshalBytesValue - case reflect.String: - if validateUTF8 { - if pointer { - return unmarshalUTF8StringPtr - } - if slice { - return unmarshalUTF8StringSlice - } - return unmarshalUTF8StringValue - } - if pointer { - return unmarshalStringPtr - } - if slice { - return unmarshalStringSlice - } - return unmarshalStringValue - case reflect.Struct: - // message or group field - if !pointer { - panic(fmt.Sprintf("message/group field %s:%s without pointer", t, encoding)) - } - switch encoding { - case "bytes": - if slice { - return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name) - } - return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name) - case "group": - if slice { - return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name) - } - return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name) - } - } - panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding)) -} - -// Below are all the unmarshalers for individual fields of various types. - -func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - *f.toInt64() = v - return b, nil -} - -func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - *f.toInt64Ptr() = &v - return b, nil -} - -func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - s := f.toInt64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - s := f.toInt64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - *f.toInt64() = v - return b, nil -} - -func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - *f.toInt64Ptr() = &v - return b, nil -} - -func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - s := f.toInt64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - s := f.toInt64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - *f.toUint64() = v - return b, nil -} - -func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - *f.toUint64Ptr() = &v - return b, nil -} - -func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - s := f.toUint64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - s := f.toUint64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - *f.toInt32() = v - return b, nil -} - -func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.setInt32Ptr(v) - return b, nil -} - -func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.appendInt32Slice(v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.appendInt32Slice(v) - return b, nil -} - -func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - *f.toInt32() = v - return b, nil -} - -func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.setInt32Ptr(v) - return b, nil -} - -func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.appendInt32Slice(v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.appendInt32Slice(v) - return b, nil -} - -func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - *f.toUint32() = v - return b, nil -} - -func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - *f.toUint32Ptr() = &v - return b, nil -} - -func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - s := f.toUint32Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - s := f.toUint32Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - *f.toUint64() = v - return b[8:], nil -} - -func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - *f.toUint64Ptr() = &v - return b[8:], nil -} - -func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - s := f.toUint64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - s := f.toUint64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - *f.toInt64() = v - return b[8:], nil -} - -func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - *f.toInt64Ptr() = &v - return b[8:], nil -} - -func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - s := f.toInt64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - s := f.toInt64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - *f.toUint32() = v - return b[4:], nil -} - -func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - *f.toUint32Ptr() = &v - return b[4:], nil -} - -func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - s := f.toUint32Slice() - *s = append(*s, v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - s := f.toUint32Slice() - *s = append(*s, v) - return b[4:], nil -} - -func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - *f.toInt32() = v - return b[4:], nil -} - -func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.setInt32Ptr(v) - return b[4:], nil -} - -func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.appendInt32Slice(v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.appendInt32Slice(v) - return b[4:], nil -} - -func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - // Note: any length varint is allowed, even though any sane - // encoder will use one byte. - // See https://github.com/golang/protobuf/issues/76 - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - // TODO: check if x>1? Tests seem to indicate no. - v := x != 0 - *f.toBool() = v - return b[n:], nil -} - -func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - *f.toBoolPtr() = &v - return b[n:], nil -} - -func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - s := f.toBoolSlice() - *s = append(*s, v) - b = b[n:] - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - s := f.toBoolSlice() - *s = append(*s, v) - return b[n:], nil -} - -func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - *f.toFloat64() = v - return b[8:], nil -} - -func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - *f.toFloat64Ptr() = &v - return b[8:], nil -} - -func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - s := f.toFloat64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - s := f.toFloat64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - *f.toFloat32() = v - return b[4:], nil -} - -func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - *f.toFloat32Ptr() = &v - return b[4:], nil -} - -func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - s := f.toFloat32Slice() - *s = append(*s, v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - s := f.toFloat32Slice() - *s = append(*s, v) - return b[4:], nil -} - -func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toString() = v - return b[x:], nil -} - -func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toStringPtr() = &v - return b[x:], nil -} - -func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - s := f.toStringSlice() - *s = append(*s, v) - return b[x:], nil -} - -func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toString() = v - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toStringPtr() = &v - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - s := f.toStringSlice() - *s = append(*s, v) - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -var emptyBuf [0]byte - -func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - // The use of append here is a trick which avoids the zeroing - // that would be required if we used a make/copy pair. - // We append to emptyBuf instead of nil because we want - // a non-nil result even when the length is 0. - v := append(emptyBuf[:], b[:x]...) - *f.toBytes() = v - return b[x:], nil -} - -func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := append(emptyBuf[:], b[:x]...) - s := f.toBytesSlice() - *s = append(*s, v) - return b[x:], nil -} - -func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - // First read the message field to see if something is there. - // The semantics of multiple submessages are weird. Instead of - // the last one winning (as it is for all other fields), multiple - // submessages are merged. - v := f.getPointer() - if v.isNil() { - v = valToPointer(reflect.New(sub.typ)) - f.setPointer(v) - } - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - return b[x:], err - } -} - -func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := valToPointer(reflect.New(sub.typ)) - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - f.appendPointer(v) - return b[x:], err - } -} - -func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireStartGroup { - return b, errInternalBadWireType - } - x, y := findEndGroup(b) - if x < 0 { - return nil, io.ErrUnexpectedEOF - } - v := f.getPointer() - if v.isNil() { - v = valToPointer(reflect.New(sub.typ)) - f.setPointer(v) - } - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - return b[y:], err - } -} - -func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireStartGroup { - return b, errInternalBadWireType - } - x, y := findEndGroup(b) - if x < 0 { - return nil, io.ErrUnexpectedEOF - } - v := valToPointer(reflect.New(sub.typ)) - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - f.appendPointer(v) - return b[y:], err - } -} - -func makeUnmarshalMap(f *reflect.StructField) unmarshaler { - t := f.Type - kt := t.Key() - vt := t.Elem() - unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key")) - unmarshalVal := typeUnmarshaler(vt, f.Tag.Get("protobuf_val")) - return func(b []byte, f pointer, w int) ([]byte, error) { - // The map entry is a submessage. Figure out how big it is. - if w != WireBytes { - return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes) - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - r := b[x:] // unused data to return - b = b[:x] // data for map entry - - // Note: we could use #keys * #values ~= 200 functions - // to do map decoding without reflection. Probably not worth it. - // Maps will be somewhat slow. Oh well. - - // Read key and value from data. - var nerr nonFatal - k := reflect.New(kt) - v := reflect.New(vt) - for len(b) > 0 { - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - wire := int(x) & 7 - b = b[n:] - - var err error - switch x >> 3 { - case 1: - b, err = unmarshalKey(b, valToPointer(k), wire) - case 2: - b, err = unmarshalVal(b, valToPointer(v), wire) - default: - err = errInternalBadWireType // skip unknown tag - } - - if nerr.Merge(err) { - continue - } - if err != errInternalBadWireType { - return nil, err - } - - // Skip past unknown fields. - b, err = skipField(b, wire) - if err != nil { - return nil, err - } - } - - // Get map, allocate if needed. - m := f.asPointerTo(t).Elem() // an addressable map[K]T - if m.IsNil() { - m.Set(reflect.MakeMap(t)) - } - - // Insert into map. - m.SetMapIndex(k.Elem(), v.Elem()) - - return r, nerr.E - } -} - -// makeUnmarshalOneof makes an unmarshaler for oneof fields. -// for: -// message Msg { -// oneof F { -// int64 X = 1; -// float64 Y = 2; -// } -// } -// typ is the type of the concrete entry for a oneof case (e.g. Msg_X). -// ityp is the interface type of the oneof field (e.g. isMsg_F). -// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64). -// Note that this function will be called once for each case in the oneof. -func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler { - sf := typ.Field(0) - field0 := toField(&sf) - return func(b []byte, f pointer, w int) ([]byte, error) { - // Allocate holder for value. - v := reflect.New(typ) - - // Unmarshal data into holder. - // We unmarshal into the first field of the holder object. - var err error - var nerr nonFatal - b, err = unmarshal(b, valToPointer(v).offset(field0), w) - if !nerr.Merge(err) { - return nil, err - } - - // Write pointer to holder into target field. - f.asPointerTo(ityp).Elem().Set(v) - - return b, nerr.E - } -} - -// Error used by decode internally. -var errInternalBadWireType = errors.New("proto: internal error: bad wiretype") - -// skipField skips past a field of type wire and returns the remaining bytes. -func skipField(b []byte, wire int) ([]byte, error) { - switch wire { - case WireVarint: - _, k := decodeVarint(b) - if k == 0 { - return b, io.ErrUnexpectedEOF - } - b = b[k:] - case WireFixed32: - if len(b) < 4 { - return b, io.ErrUnexpectedEOF - } - b = b[4:] - case WireFixed64: - if len(b) < 8 { - return b, io.ErrUnexpectedEOF - } - b = b[8:] - case WireBytes: - m, k := decodeVarint(b) - if k == 0 || uint64(len(b)-k) < m { - return b, io.ErrUnexpectedEOF - } - b = b[uint64(k)+m:] - case WireStartGroup: - _, i := findEndGroup(b) - if i == -1 { - return b, io.ErrUnexpectedEOF - } - b = b[i:] - default: - return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire) - } - return b, nil -} - -// findEndGroup finds the index of the next EndGroup tag. -// Groups may be nested, so the "next" EndGroup tag is the first -// unpaired EndGroup. -// findEndGroup returns the indexes of the start and end of the EndGroup tag. -// Returns (-1,-1) if it can't find one. -func findEndGroup(b []byte) (int, int) { - depth := 1 - i := 0 - for { - x, n := decodeVarint(b[i:]) - if n == 0 { - return -1, -1 - } - j := i - i += n - switch x & 7 { - case WireVarint: - _, k := decodeVarint(b[i:]) - if k == 0 { - return -1, -1 - } - i += k - case WireFixed32: - if len(b)-4 < i { - return -1, -1 - } - i += 4 - case WireFixed64: - if len(b)-8 < i { - return -1, -1 - } - i += 8 - case WireBytes: - m, k := decodeVarint(b[i:]) - if k == 0 { - return -1, -1 - } - i += k - if uint64(len(b)-i) < m { - return -1, -1 - } - i += int(m) - case WireStartGroup: - depth++ - case WireEndGroup: - depth-- - if depth == 0 { - return j, i - } - default: - return -1, -1 - } - } -} - -// encodeVarint appends a varint-encoded integer to b and returns the result. -func encodeVarint(b []byte, x uint64) []byte { - for x >= 1<<7 { - b = append(b, byte(x&0x7f|0x80)) - x >>= 7 - } - return append(b, byte(x)) -} - -// decodeVarint reads a varint-encoded integer from b. -// Returns the decoded integer and the number of bytes read. -// If there is an error, it returns 0,0. -func decodeVarint(b []byte) (uint64, int) { - var x, y uint64 - if len(b) == 0 { - goto bad - } - x = uint64(b[0]) - if x < 0x80 { - return x, 1 - } - x -= 0x80 - - if len(b) <= 1 { - goto bad - } - y = uint64(b[1]) - x += y << 7 - if y < 0x80 { - return x, 2 - } - x -= 0x80 << 7 - - if len(b) <= 2 { - goto bad - } - y = uint64(b[2]) - x += y << 14 - if y < 0x80 { - return x, 3 - } - x -= 0x80 << 14 - - if len(b) <= 3 { - goto bad - } - y = uint64(b[3]) - x += y << 21 - if y < 0x80 { - return x, 4 - } - x -= 0x80 << 21 - - if len(b) <= 4 { - goto bad - } - y = uint64(b[4]) - x += y << 28 - if y < 0x80 { - return x, 5 - } - x -= 0x80 << 28 - - if len(b) <= 5 { - goto bad - } - y = uint64(b[5]) - x += y << 35 - if y < 0x80 { - return x, 6 - } - x -= 0x80 << 35 - - if len(b) <= 6 { - goto bad - } - y = uint64(b[6]) - x += y << 42 - if y < 0x80 { - return x, 7 - } - x -= 0x80 << 42 - - if len(b) <= 7 { - goto bad - } - y = uint64(b[7]) - x += y << 49 - if y < 0x80 { - return x, 8 - } - x -= 0x80 << 49 - - if len(b) <= 8 { - goto bad - } - y = uint64(b[8]) - x += y << 56 - if y < 0x80 { - return x, 9 - } - x -= 0x80 << 56 - - if len(b) <= 9 { - goto bad - } - y = uint64(b[9]) - x += y << 63 - if y < 2 { - return x, 10 - } - -bad: - return 0, 0 -} diff --git a/cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/text.go b/cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/text.go deleted file mode 100644 index 1aaee72..0000000 --- a/cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/text.go +++ /dev/null @@ -1,843 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for writing the text protocol buffer format. - -import ( - "bufio" - "bytes" - "encoding" - "errors" - "fmt" - "io" - "log" - "math" - "reflect" - "sort" - "strings" -) - -var ( - newline = []byte("\n") - spaces = []byte(" ") - endBraceNewline = []byte("}\n") - backslashN = []byte{'\\', 'n'} - backslashR = []byte{'\\', 'r'} - backslashT = []byte{'\\', 't'} - backslashDQ = []byte{'\\', '"'} - backslashBS = []byte{'\\', '\\'} - posInf = []byte("inf") - negInf = []byte("-inf") - nan = []byte("nan") -) - -type writer interface { - io.Writer - WriteByte(byte) error -} - -// textWriter is an io.Writer that tracks its indentation level. -type textWriter struct { - ind int - complete bool // if the current position is a complete line - compact bool // whether to write out as a one-liner - w writer -} - -func (w *textWriter) WriteString(s string) (n int, err error) { - if !strings.Contains(s, "\n") { - if !w.compact && w.complete { - w.writeIndent() - } - w.complete = false - return io.WriteString(w.w, s) - } - // WriteString is typically called without newlines, so this - // codepath and its copy are rare. We copy to avoid - // duplicating all of Write's logic here. - return w.Write([]byte(s)) -} - -func (w *textWriter) Write(p []byte) (n int, err error) { - newlines := bytes.Count(p, newline) - if newlines == 0 { - if !w.compact && w.complete { - w.writeIndent() - } - n, err = w.w.Write(p) - w.complete = false - return n, err - } - - frags := bytes.SplitN(p, newline, newlines+1) - if w.compact { - for i, frag := range frags { - if i > 0 { - if err := w.w.WriteByte(' '); err != nil { - return n, err - } - n++ - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - } - return n, nil - } - - for i, frag := range frags { - if w.complete { - w.writeIndent() - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - if i+1 < len(frags) { - if err := w.w.WriteByte('\n'); err != nil { - return n, err - } - n++ - } - } - w.complete = len(frags[len(frags)-1]) == 0 - return n, nil -} - -func (w *textWriter) WriteByte(c byte) error { - if w.compact && c == '\n' { - c = ' ' - } - if !w.compact && w.complete { - w.writeIndent() - } - err := w.w.WriteByte(c) - w.complete = c == '\n' - return err -} - -func (w *textWriter) indent() { w.ind++ } - -func (w *textWriter) unindent() { - if w.ind == 0 { - log.Print("proto: textWriter unindented too far") - return - } - w.ind-- -} - -func writeName(w *textWriter, props *Properties) error { - if _, err := w.WriteString(props.OrigName); err != nil { - return err - } - if props.Wire != "group" { - return w.WriteByte(':') - } - return nil -} - -func requiresQuotes(u string) bool { - // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. - for _, ch := range u { - switch { - case ch == '.' || ch == '/' || ch == '_': - continue - case '0' <= ch && ch <= '9': - continue - case 'A' <= ch && ch <= 'Z': - continue - case 'a' <= ch && ch <= 'z': - continue - default: - return true - } - } - return false -} - -// isAny reports whether sv is a google.protobuf.Any message -func isAny(sv reflect.Value) bool { - type wkt interface { - XXX_WellKnownType() string - } - t, ok := sv.Addr().Interface().(wkt) - return ok && t.XXX_WellKnownType() == "Any" -} - -// writeProto3Any writes an expanded google.protobuf.Any message. -// -// It returns (false, nil) if sv value can't be unmarshaled (e.g. because -// required messages are not linked in). -// -// It returns (true, error) when sv was written in expanded format or an error -// was encountered. -func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { - turl := sv.FieldByName("TypeUrl") - val := sv.FieldByName("Value") - if !turl.IsValid() || !val.IsValid() { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - b, ok := val.Interface().([]byte) - if !ok { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - parts := strings.Split(turl.String(), "/") - mt := MessageType(parts[len(parts)-1]) - if mt == nil { - return false, nil - } - m := reflect.New(mt.Elem()) - if err := Unmarshal(b, m.Interface().(Message)); err != nil { - return false, nil - } - w.Write([]byte("[")) - u := turl.String() - if requiresQuotes(u) { - writeString(w, u) - } else { - w.Write([]byte(u)) - } - if w.compact { - w.Write([]byte("]:<")) - } else { - w.Write([]byte("]: <\n")) - w.ind++ - } - if err := tm.writeStruct(w, m.Elem()); err != nil { - return true, err - } - if w.compact { - w.Write([]byte("> ")) - } else { - w.ind-- - w.Write([]byte(">\n")) - } - return true, nil -} - -func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { - if tm.ExpandAny && isAny(sv) { - if canExpand, err := tm.writeProto3Any(w, sv); canExpand { - return err - } - } - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < sv.NumField(); i++ { - fv := sv.Field(i) - props := sprops.Prop[i] - name := st.Field(i).Name - - if name == "XXX_NoUnkeyedLiteral" { - continue - } - - if strings.HasPrefix(name, "XXX_") { - // There are two XXX_ fields: - // XXX_unrecognized []byte - // XXX_extensions map[int32]proto.Extension - // The first is handled here; - // the second is handled at the bottom of this function. - if name == "XXX_unrecognized" && !fv.IsNil() { - if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Field not filled in. This could be an optional field or - // a required field that wasn't filled in. Either way, there - // isn't anything we can show for it. - continue - } - if fv.Kind() == reflect.Slice && fv.IsNil() { - // Repeated field that is empty, or a bytes field that is unused. - continue - } - - if props.Repeated && fv.Kind() == reflect.Slice { - // Repeated field. - for j := 0; j < fv.Len(); j++ { - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - v := fv.Index(j) - if v.Kind() == reflect.Ptr && v.IsNil() { - // A nil message in a repeated field is not valid, - // but we can handle that more gracefully than panicking. - if _, err := w.Write([]byte("\n")); err != nil { - return err - } - continue - } - if err := tm.writeAny(w, v, props); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Map { - // Map fields are rendered as a repeated struct with key/value fields. - keys := fv.MapKeys() - sort.Sort(mapKeys(keys)) - for _, key := range keys { - val := fv.MapIndex(key) - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - // open struct - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - // key - if _, err := w.WriteString("key:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, key, props.MapKeyProp); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - // nil values aren't legal, but we can avoid panicking because of them. - if val.Kind() != reflect.Ptr || !val.IsNil() { - // value - if _, err := w.WriteString("value:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, val, props.MapValProp); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - // close struct - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { - // empty bytes field - continue - } - if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { - // proto3 non-repeated scalar field; skip if zero value - if isProto3Zero(fv) { - continue - } - } - - if fv.Kind() == reflect.Interface { - // Check if it is a oneof. - if st.Field(i).Tag.Get("protobuf_oneof") != "" { - // fv is nil, or holds a pointer to generated struct. - // That generated struct has exactly one field, - // which has a protobuf struct tag. - if fv.IsNil() { - continue - } - inner := fv.Elem().Elem() // interface -> *T -> T - tag := inner.Type().Field(0).Tag.Get("protobuf") - props = new(Properties) // Overwrite the outer props var, but not its pointee. - props.Parse(tag) - // Write the value in the oneof, not the oneof itself. - fv = inner.Field(0) - - // Special case to cope with malformed messages gracefully: - // If the value in the oneof is a nil pointer, don't panic - // in writeAny. - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Use errors.New so writeAny won't render quotes. - msg := errors.New("/* nil */") - fv = reflect.ValueOf(&msg).Elem() - } - } - } - - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - - // Enums have a String method, so writeAny will work fine. - if err := tm.writeAny(w, fv, props); err != nil { - return err - } - - if err := w.WriteByte('\n'); err != nil { - return err - } - } - - // Extensions (the XXX_extensions field). - pv := sv.Addr() - if _, err := extendable(pv.Interface()); err == nil { - if err := tm.writeExtensions(w, pv); err != nil { - return err - } - } - - return nil -} - -// writeAny writes an arbitrary field. -func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { - v = reflect.Indirect(v) - - // Floats have special cases. - if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { - x := v.Float() - var b []byte - switch { - case math.IsInf(x, 1): - b = posInf - case math.IsInf(x, -1): - b = negInf - case math.IsNaN(x): - b = nan - } - if b != nil { - _, err := w.Write(b) - return err - } - // Other values are handled below. - } - - // We don't attempt to serialise every possible value type; only those - // that can occur in protocol buffers. - switch v.Kind() { - case reflect.Slice: - // Should only be a []byte; repeated fields are handled in writeStruct. - if err := writeString(w, string(v.Bytes())); err != nil { - return err - } - case reflect.String: - if err := writeString(w, v.String()); err != nil { - return err - } - case reflect.Struct: - // Required/optional group/message. - var bra, ket byte = '<', '>' - if props != nil && props.Wire == "group" { - bra, ket = '{', '}' - } - if err := w.WriteByte(bra); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if v.CanAddr() { - // Calling v.Interface on a struct causes the reflect package to - // copy the entire struct. This is racy with the new Marshaler - // since we atomically update the XXX_sizecache. - // - // Thus, we retrieve a pointer to the struct if possible to avoid - // a race since v.Interface on the pointer doesn't copy the struct. - // - // If v is not addressable, then we are not worried about a race - // since it implies that the binary Marshaler cannot possibly be - // mutating this value. - v = v.Addr() - } - if etm, ok := v.Interface().(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = w.Write(text); err != nil { - return err - } - } else { - if v.Kind() == reflect.Ptr { - v = v.Elem() - } - if err := tm.writeStruct(w, v); err != nil { - return err - } - } - w.unindent() - if err := w.WriteByte(ket); err != nil { - return err - } - default: - _, err := fmt.Fprint(w, v.Interface()) - return err - } - return nil -} - -// equivalent to C's isprint. -func isprint(c byte) bool { - return c >= 0x20 && c < 0x7f -} - -// writeString writes a string in the protocol buffer text format. -// It is similar to strconv.Quote except we don't use Go escape sequences, -// we treat the string as a byte sequence, and we use octal escapes. -// These differences are to maintain interoperability with the other -// languages' implementations of the text format. -func writeString(w *textWriter, s string) error { - // use WriteByte here to get any needed indent - if err := w.WriteByte('"'); err != nil { - return err - } - // Loop over the bytes, not the runes. - for i := 0; i < len(s); i++ { - var err error - // Divergence from C++: we don't escape apostrophes. - // There's no need to escape them, and the C++ parser - // copes with a naked apostrophe. - switch c := s[i]; c { - case '\n': - _, err = w.w.Write(backslashN) - case '\r': - _, err = w.w.Write(backslashR) - case '\t': - _, err = w.w.Write(backslashT) - case '"': - _, err = w.w.Write(backslashDQ) - case '\\': - _, err = w.w.Write(backslashBS) - default: - if isprint(c) { - err = w.w.WriteByte(c) - } else { - _, err = fmt.Fprintf(w.w, "\\%03o", c) - } - } - if err != nil { - return err - } - } - return w.WriteByte('"') -} - -func writeUnknownStruct(w *textWriter, data []byte) (err error) { - if !w.compact { - if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { - return err - } - } - b := NewBuffer(data) - for b.index < len(b.buf) { - x, err := b.DecodeVarint() - if err != nil { - _, err := fmt.Fprintf(w, "/* %v */\n", err) - return err - } - wire, tag := x&7, x>>3 - if wire == WireEndGroup { - w.unindent() - if _, err := w.Write(endBraceNewline); err != nil { - return err - } - continue - } - if _, err := fmt.Fprint(w, tag); err != nil { - return err - } - if wire != WireStartGroup { - if err := w.WriteByte(':'); err != nil { - return err - } - } - if !w.compact || wire == WireStartGroup { - if err := w.WriteByte(' '); err != nil { - return err - } - } - switch wire { - case WireBytes: - buf, e := b.DecodeRawBytes(false) - if e == nil { - _, err = fmt.Fprintf(w, "%q", buf) - } else { - _, err = fmt.Fprintf(w, "/* %v */", e) - } - case WireFixed32: - x, err = b.DecodeFixed32() - err = writeUnknownInt(w, x, err) - case WireFixed64: - x, err = b.DecodeFixed64() - err = writeUnknownInt(w, x, err) - case WireStartGroup: - err = w.WriteByte('{') - w.indent() - case WireVarint: - x, err = b.DecodeVarint() - err = writeUnknownInt(w, x, err) - default: - _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) - } - if err != nil { - return err - } - if err = w.WriteByte('\n'); err != nil { - return err - } - } - return nil -} - -func writeUnknownInt(w *textWriter, x uint64, err error) error { - if err == nil { - _, err = fmt.Fprint(w, x) - } else { - _, err = fmt.Fprintf(w, "/* %v */", err) - } - return err -} - -type int32Slice []int32 - -func (s int32Slice) Len() int { return len(s) } -func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } -func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// writeExtensions writes all the extensions in pv. -// pv is assumed to be a pointer to a protocol message struct that is extendable. -func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { - emap := extensionMaps[pv.Type().Elem()] - ep, _ := extendable(pv.Interface()) - - // Order the extensions by ID. - // This isn't strictly necessary, but it will give us - // canonical output, which will also make testing easier. - m, mu := ep.extensionsRead() - if m == nil { - return nil - } - mu.Lock() - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) - mu.Unlock() - - for _, extNum := range ids { - ext := m[extNum] - var desc *ExtensionDesc - if emap != nil { - desc = emap[extNum] - } - if desc == nil { - // Unknown extension. - if err := writeUnknownStruct(w, ext.enc); err != nil { - return err - } - continue - } - - pb, err := GetExtension(ep, desc) - if err != nil { - return fmt.Errorf("failed getting extension: %v", err) - } - - // Repeated extensions will appear as a slice. - if !desc.repeated() { - if err := tm.writeExtension(w, desc.Name, pb); err != nil { - return err - } - } else { - v := reflect.ValueOf(pb) - for i := 0; i < v.Len(); i++ { - if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { - return err - } - } - } - } - return nil -} - -func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { - if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - return nil -} - -func (w *textWriter) writeIndent() { - if !w.complete { - return - } - remain := w.ind * 2 - for remain > 0 { - n := remain - if n > len(spaces) { - n = len(spaces) - } - w.w.Write(spaces[:n]) - remain -= n - } - w.complete = false -} - -// TextMarshaler is a configurable text format marshaler. -type TextMarshaler struct { - Compact bool // use compact text format (one line). - ExpandAny bool // expand google.protobuf.Any messages of known types -} - -// Marshal writes a given protocol buffer in text format. -// The only errors returned are from w. -func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { - val := reflect.ValueOf(pb) - if pb == nil || val.IsNil() { - w.Write([]byte("")) - return nil - } - var bw *bufio.Writer - ww, ok := w.(writer) - if !ok { - bw = bufio.NewWriter(w) - ww = bw - } - aw := &textWriter{ - w: ww, - complete: true, - compact: tm.Compact, - } - - if etm, ok := pb.(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = aw.Write(text); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil - } - // Dereference the received pointer so we don't have outer < and >. - v := reflect.Indirect(val) - if err := tm.writeStruct(aw, v); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil -} - -// Text is the same as Marshal, but returns the string directly. -func (tm *TextMarshaler) Text(pb Message) string { - var buf bytes.Buffer - tm.Marshal(&buf, pb) - return buf.String() -} - -var ( - defaultTextMarshaler = TextMarshaler{} - compactTextMarshaler = TextMarshaler{Compact: true} -) - -// TODO: consider removing some of the Marshal functions below. - -// MarshalText writes a given protocol buffer in text format. -// The only errors returned are from w. -func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } - -// MarshalTextString is the same as MarshalText, but returns the string directly. -func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } - -// CompactText writes a given protocol buffer in compact text format (one line). -func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } - -// CompactTextString is the same as CompactText, but returns the string directly. -func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/text_parser.go b/cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/text_parser.go deleted file mode 100644 index bb55a3a..0000000 --- a/cmd/bpa-operator/vendor/github.com/golang/protobuf/proto/text_parser.go +++ /dev/null @@ -1,880 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for parsing the Text protocol buffer format. -// TODO: message sets. - -import ( - "encoding" - "errors" - "fmt" - "reflect" - "strconv" - "strings" - "unicode/utf8" -) - -// Error string emitted when deserializing Any and fields are already set -const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" - -type ParseError struct { - Message string - Line int // 1-based line number - Offset int // 0-based byte offset from start of input -} - -func (p *ParseError) Error() string { - if p.Line == 1 { - // show offset only for first line - return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) - } - return fmt.Sprintf("line %d: %v", p.Line, p.Message) -} - -type token struct { - value string - err *ParseError - line int // line number - offset int // byte number from start of input, not start of line - unquoted string // the unquoted version of value, if it was a quoted string -} - -func (t *token) String() string { - if t.err == nil { - return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) - } - return fmt.Sprintf("parse error: %v", t.err) -} - -type textParser struct { - s string // remaining input - done bool // whether the parsing is finished (success or error) - backed bool // whether back() was called - offset, line int - cur token -} - -func newTextParser(s string) *textParser { - p := new(textParser) - p.s = s - p.line = 1 - p.cur.line = 1 - return p -} - -func (p *textParser) errorf(format string, a ...interface{}) *ParseError { - pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} - p.cur.err = pe - p.done = true - return pe -} - -// Numbers and identifiers are matched by [-+._A-Za-z0-9] -func isIdentOrNumberChar(c byte) bool { - switch { - case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': - return true - case '0' <= c && c <= '9': - return true - } - switch c { - case '-', '+', '.', '_': - return true - } - return false -} - -func isWhitespace(c byte) bool { - switch c { - case ' ', '\t', '\n', '\r': - return true - } - return false -} - -func isQuote(c byte) bool { - switch c { - case '"', '\'': - return true - } - return false -} - -func (p *textParser) skipWhitespace() { - i := 0 - for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { - if p.s[i] == '#' { - // comment; skip to end of line or input - for i < len(p.s) && p.s[i] != '\n' { - i++ - } - if i == len(p.s) { - break - } - } - if p.s[i] == '\n' { - p.line++ - } - i++ - } - p.offset += i - p.s = p.s[i:len(p.s)] - if len(p.s) == 0 { - p.done = true - } -} - -func (p *textParser) advance() { - // Skip whitespace - p.skipWhitespace() - if p.done { - return - } - - // Start of non-whitespace - p.cur.err = nil - p.cur.offset, p.cur.line = p.offset, p.line - p.cur.unquoted = "" - switch p.s[0] { - case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': - // Single symbol - p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] - case '"', '\'': - // Quoted string - i := 1 - for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { - if p.s[i] == '\\' && i+1 < len(p.s) { - // skip escaped char - i++ - } - i++ - } - if i >= len(p.s) || p.s[i] != p.s[0] { - p.errorf("unmatched quote") - return - } - unq, err := unquoteC(p.s[1:i], rune(p.s[0])) - if err != nil { - p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) - return - } - p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] - p.cur.unquoted = unq - default: - i := 0 - for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { - i++ - } - if i == 0 { - p.errorf("unexpected byte %#x", p.s[0]) - return - } - p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] - } - p.offset += len(p.cur.value) -} - -var ( - errBadUTF8 = errors.New("proto: bad UTF-8") -) - -func unquoteC(s string, quote rune) (string, error) { - // This is based on C++'s tokenizer.cc. - // Despite its name, this is *not* parsing C syntax. - // For instance, "\0" is an invalid quoted string. - - // Avoid allocation in trivial cases. - simple := true - for _, r := range s { - if r == '\\' || r == quote { - simple = false - break - } - } - if simple { - return s, nil - } - - buf := make([]byte, 0, 3*len(s)/2) - for len(s) > 0 { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", errBadUTF8 - } - s = s[n:] - if r != '\\' { - if r < utf8.RuneSelf { - buf = append(buf, byte(r)) - } else { - buf = append(buf, string(r)...) - } - continue - } - - ch, tail, err := unescape(s) - if err != nil { - return "", err - } - buf = append(buf, ch...) - s = tail - } - return string(buf), nil -} - -func unescape(s string) (ch string, tail string, err error) { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", "", errBadUTF8 - } - s = s[n:] - switch r { - case 'a': - return "\a", s, nil - case 'b': - return "\b", s, nil - case 'f': - return "\f", s, nil - case 'n': - return "\n", s, nil - case 'r': - return "\r", s, nil - case 't': - return "\t", s, nil - case 'v': - return "\v", s, nil - case '?': - return "?", s, nil // trigraph workaround - case '\'', '"', '\\': - return string(r), s, nil - case '0', '1', '2', '3', '4', '5', '6', '7': - if len(s) < 2 { - return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) - } - ss := string(r) + s[:2] - s = s[2:] - i, err := strconv.ParseUint(ss, 8, 8) - if err != nil { - return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) - } - return string([]byte{byte(i)}), s, nil - case 'x', 'X', 'u', 'U': - var n int - switch r { - case 'x', 'X': - n = 2 - case 'u': - n = 4 - case 'U': - n = 8 - } - if len(s) < n { - return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) - } - ss := s[:n] - s = s[n:] - i, err := strconv.ParseUint(ss, 16, 64) - if err != nil { - return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) - } - if r == 'x' || r == 'X' { - return string([]byte{byte(i)}), s, nil - } - if i > utf8.MaxRune { - return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) - } - return string(i), s, nil - } - return "", "", fmt.Errorf(`unknown escape \%c`, r) -} - -// Back off the parser by one token. Can only be done between calls to next(). -// It makes the next advance() a no-op. -func (p *textParser) back() { p.backed = true } - -// Advances the parser and returns the new current token. -func (p *textParser) next() *token { - if p.backed || p.done { - p.backed = false - return &p.cur - } - p.advance() - if p.done { - p.cur.value = "" - } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { - // Look for multiple quoted strings separated by whitespace, - // and concatenate them. - cat := p.cur - for { - p.skipWhitespace() - if p.done || !isQuote(p.s[0]) { - break - } - p.advance() - if p.cur.err != nil { - return &p.cur - } - cat.value += " " + p.cur.value - cat.unquoted += p.cur.unquoted - } - p.done = false // parser may have seen EOF, but we want to return cat - p.cur = cat - } - return &p.cur -} - -func (p *textParser) consumeToken(s string) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != s { - p.back() - return p.errorf("expected %q, found %q", s, tok.value) - } - return nil -} - -// Return a RequiredNotSetError indicating which required field was not set. -func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < st.NumField(); i++ { - if !isNil(sv.Field(i)) { - continue - } - - props := sprops.Prop[i] - if props.Required { - return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} - } - } - return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen -} - -// Returns the index in the struct for the named field, as well as the parsed tag properties. -func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { - i, ok := sprops.decoderOrigNames[name] - if ok { - return i, sprops.Prop[i], true - } - return -1, nil, false -} - -// Consume a ':' from the input stream (if the next token is a colon), -// returning an error if a colon is needed but not present. -func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ":" { - // Colon is optional when the field is a group or message. - needColon := true - switch props.Wire { - case "group": - needColon = false - case "bytes": - // A "bytes" field is either a message, a string, or a repeated field; - // those three become *T, *string and []T respectively, so we can check for - // this field being a pointer to a non-string. - if typ.Kind() == reflect.Ptr { - // *T or *string - if typ.Elem().Kind() == reflect.String { - break - } - } else if typ.Kind() == reflect.Slice { - // []T or []*T - if typ.Elem().Kind() != reflect.Ptr { - break - } - } else if typ.Kind() == reflect.String { - // The proto3 exception is for a string field, - // which requires a colon. - break - } - needColon = false - } - if needColon { - return p.errorf("expected ':', found %q", tok.value) - } - p.back() - } - return nil -} - -func (p *textParser) readStruct(sv reflect.Value, terminator string) error { - st := sv.Type() - sprops := GetProperties(st) - reqCount := sprops.reqCount - var reqFieldErr error - fieldSet := make(map[string]bool) - // A struct is a sequence of "name: value", terminated by one of - // '>' or '}', or the end of the input. A name may also be - // "[extension]" or "[type/url]". - // - // The whole struct can also be an expanded Any message, like: - // [type/url] < ... struct contents ... > - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - if tok.value == "[" { - // Looks like an extension or an Any. - // - // TODO: Check whether we need to handle - // namespace rooted names (e.g. ".something.Foo"). - extName, err := p.consumeExtName() - if err != nil { - return err - } - - if s := strings.LastIndex(extName, "/"); s >= 0 { - // If it contains a slash, it's an Any type URL. - messageName := extName[s+1:] - mt := MessageType(messageName) - if mt == nil { - return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) - } - tok = p.next() - if tok.err != nil { - return tok.err - } - // consume an optional colon - if tok.value == ":" { - tok = p.next() - if tok.err != nil { - return tok.err - } - } - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - v := reflect.New(mt.Elem()) - if pe := p.readStruct(v.Elem(), terminator); pe != nil { - return pe - } - b, err := Marshal(v.Interface().(Message)) - if err != nil { - return p.errorf("failed to marshal message of type %q: %v", messageName, err) - } - if fieldSet["type_url"] { - return p.errorf(anyRepeatedlyUnpacked, "type_url") - } - if fieldSet["value"] { - return p.errorf(anyRepeatedlyUnpacked, "value") - } - sv.FieldByName("TypeUrl").SetString(extName) - sv.FieldByName("Value").SetBytes(b) - fieldSet["type_url"] = true - fieldSet["value"] = true - continue - } - - var desc *ExtensionDesc - // This could be faster, but it's functional. - // TODO: Do something smarter than a linear scan. - for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { - if d.Name == extName { - desc = d - break - } - } - if desc == nil { - return p.errorf("unrecognized extension %q", extName) - } - - props := &Properties{} - props.Parse(desc.Tag) - - typ := reflect.TypeOf(desc.ExtensionType) - if err := p.checkForColon(props, typ); err != nil { - return err - } - - rep := desc.repeated() - - // Read the extension structure, and set it in - // the value we're constructing. - var ext reflect.Value - if !rep { - ext = reflect.New(typ).Elem() - } else { - ext = reflect.New(typ.Elem()).Elem() - } - if err := p.readAny(ext, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - ep := sv.Addr().Interface().(Message) - if !rep { - SetExtension(ep, desc, ext.Interface()) - } else { - old, err := GetExtension(ep, desc) - var sl reflect.Value - if err == nil { - sl = reflect.ValueOf(old) // existing slice - } else { - sl = reflect.MakeSlice(typ, 0, 1) - } - sl = reflect.Append(sl, ext) - SetExtension(ep, desc, sl.Interface()) - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - continue - } - - // This is a normal, non-extension field. - name := tok.value - var dst reflect.Value - fi, props, ok := structFieldByName(sprops, name) - if ok { - dst = sv.Field(fi) - } else if oop, ok := sprops.OneofTypes[name]; ok { - // It is a oneof. - props = oop.Prop - nv := reflect.New(oop.Type.Elem()) - dst = nv.Elem().Field(0) - field := sv.Field(oop.Field) - if !field.IsNil() { - return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) - } - field.Set(nv) - } - if !dst.IsValid() { - return p.errorf("unknown field name %q in %v", name, st) - } - - if dst.Kind() == reflect.Map { - // Consume any colon. - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Construct the map if it doesn't already exist. - if dst.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) - } - key := reflect.New(dst.Type().Key()).Elem() - val := reflect.New(dst.Type().Elem()).Elem() - - // The map entry should be this sequence of tokens: - // < key : KEY value : VALUE > - // However, implementations may omit key or value, and technically - // we should support them in any order. See b/28924776 for a time - // this went wrong. - - tok := p.next() - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - switch tok.value { - case "key": - if err := p.consumeToken(":"); err != nil { - return err - } - if err := p.readAny(key, props.MapKeyProp); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - case "value": - if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil { - return err - } - if err := p.readAny(val, props.MapValProp); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - default: - p.back() - return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) - } - } - - dst.SetMapIndex(key, val) - continue - } - - // Check that it's not already set if it's not a repeated field. - if !props.Repeated && fieldSet[name] { - return p.errorf("non-repeated field %q was repeated", name) - } - - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Parse into the field. - fieldSet[name] = true - if err := p.readAny(dst, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - if props.Required { - reqCount-- - } - - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - - } - - if reqCount > 0 { - return p.missingRequiredFieldError(sv) - } - return reqFieldErr -} - -// consumeExtName consumes extension name or expanded Any type URL and the -// following ']'. It returns the name or URL consumed. -func (p *textParser) consumeExtName() (string, error) { - tok := p.next() - if tok.err != nil { - return "", tok.err - } - - // If extension name or type url is quoted, it's a single token. - if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { - name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) - if err != nil { - return "", err - } - return name, p.consumeToken("]") - } - - // Consume everything up to "]" - var parts []string - for tok.value != "]" { - parts = append(parts, tok.value) - tok = p.next() - if tok.err != nil { - return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) - } - if p.done && tok.value != "]" { - return "", p.errorf("unclosed type_url or extension name") - } - } - return strings.Join(parts, ""), nil -} - -// consumeOptionalSeparator consumes an optional semicolon or comma. -// It is used in readStruct to provide backward compatibility. -func (p *textParser) consumeOptionalSeparator() error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ";" && tok.value != "," { - p.back() - } - return nil -} - -func (p *textParser) readAny(v reflect.Value, props *Properties) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "" { - return p.errorf("unexpected EOF") - } - - switch fv := v; fv.Kind() { - case reflect.Slice: - at := v.Type() - if at.Elem().Kind() == reflect.Uint8 { - // Special case for []byte - if tok.value[0] != '"' && tok.value[0] != '\'' { - // Deliberately written out here, as the error after - // this switch statement would write "invalid []byte: ...", - // which is not as user-friendly. - return p.errorf("invalid string: %v", tok.value) - } - bytes := []byte(tok.unquoted) - fv.Set(reflect.ValueOf(bytes)) - return nil - } - // Repeated field. - if tok.value == "[" { - // Repeated field with list notation, like [1,2,3]. - for { - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - err := p.readAny(fv.Index(fv.Len()-1), props) - if err != nil { - return err - } - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "]" { - break - } - if tok.value != "," { - return p.errorf("Expected ']' or ',' found %q", tok.value) - } - } - return nil - } - // One value of the repeated field. - p.back() - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - return p.readAny(fv.Index(fv.Len()-1), props) - case reflect.Bool: - // true/1/t/True or false/f/0/False. - switch tok.value { - case "true", "1", "t", "True": - fv.SetBool(true) - return nil - case "false", "0", "f", "False": - fv.SetBool(false) - return nil - } - case reflect.Float32, reflect.Float64: - v := tok.value - // Ignore 'f' for compatibility with output generated by C++, but don't - // remove 'f' when the value is "-inf" or "inf". - if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { - v = v[:len(v)-1] - } - if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { - fv.SetFloat(f) - return nil - } - case reflect.Int32: - if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { - fv.SetInt(x) - return nil - } - - if len(props.Enum) == 0 { - break - } - m, ok := enumValueMaps[props.Enum] - if !ok { - break - } - x, ok := m[tok.value] - if !ok { - break - } - fv.SetInt(int64(x)) - return nil - case reflect.Int64: - if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { - fv.SetInt(x) - return nil - } - - case reflect.Ptr: - // A basic field (indirected through pointer), or a repeated message/group - p.back() - fv.Set(reflect.New(fv.Type().Elem())) - return p.readAny(fv.Elem(), props) - case reflect.String: - if tok.value[0] == '"' || tok.value[0] == '\'' { - fv.SetString(tok.unquoted) - return nil - } - case reflect.Struct: - var terminator string - switch tok.value { - case "{": - terminator = "}" - case "<": - terminator = ">" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - // TODO: Handle nested messages which implement encoding.TextUnmarshaler. - return p.readStruct(fv, terminator) - case reflect.Uint32: - if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - fv.SetUint(uint64(x)) - return nil - } - case reflect.Uint64: - if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { - fv.SetUint(x) - return nil - } - } - return p.errorf("invalid %v: %v", v.Type(), tok.value) -} - -// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb -// before starting to unmarshal, so any existing data in pb is always removed. -// If a required field is not set and no other error occurs, -// UnmarshalText returns *RequiredNotSetError. -func UnmarshalText(s string, pb Message) error { - if um, ok := pb.(encoding.TextUnmarshaler); ok { - return um.UnmarshalText([]byte(s)) - } - pb.Reset() - v := reflect.ValueOf(pb) - return newTextParser(s).readStruct(v.Elem(), "") -} diff --git a/cmd/bpa-operator/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go b/cmd/bpa-operator/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go deleted file mode 100644 index 1ded05b..0000000 --- a/cmd/bpa-operator/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go +++ /dev/null @@ -1,2887 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/descriptor.proto - -package descriptor - -import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -type FieldDescriptorProto_Type int32 - -const ( - // 0 is reserved for errors. - // Order is weird for historical reasons. - FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1 - FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2 - // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if - // negative values are likely. - FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3 - FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4 - // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if - // negative values are likely. - FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5 - FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6 - FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 - FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 - FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 - // Tag-delimited aggregate. - // Group type is deprecated and not supported in proto3. However, Proto3 - // implementations should still be able to parse the group wire format and - // treat group fields as unknown fields. - FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 - FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 - // New in version 2. - FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12 - FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13 - FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14 - FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15 - FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16 - FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17 - FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18 -) - -var FieldDescriptorProto_Type_name = map[int32]string{ - 1: "TYPE_DOUBLE", - 2: "TYPE_FLOAT", - 3: "TYPE_INT64", - 4: "TYPE_UINT64", - 5: "TYPE_INT32", - 6: "TYPE_FIXED64", - 7: "TYPE_FIXED32", - 8: "TYPE_BOOL", - 9: "TYPE_STRING", - 10: "TYPE_GROUP", - 11: "TYPE_MESSAGE", - 12: "TYPE_BYTES", - 13: "TYPE_UINT32", - 14: "TYPE_ENUM", - 15: "TYPE_SFIXED32", - 16: "TYPE_SFIXED64", - 17: "TYPE_SINT32", - 18: "TYPE_SINT64", -} - -var FieldDescriptorProto_Type_value = map[string]int32{ - "TYPE_DOUBLE": 1, - "TYPE_FLOAT": 2, - "TYPE_INT64": 3, - "TYPE_UINT64": 4, - "TYPE_INT32": 5, - "TYPE_FIXED64": 6, - "TYPE_FIXED32": 7, - "TYPE_BOOL": 8, - "TYPE_STRING": 9, - "TYPE_GROUP": 10, - "TYPE_MESSAGE": 11, - "TYPE_BYTES": 12, - "TYPE_UINT32": 13, - "TYPE_ENUM": 14, - "TYPE_SFIXED32": 15, - "TYPE_SFIXED64": 16, - "TYPE_SINT32": 17, - "TYPE_SINT64": 18, -} - -func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { - p := new(FieldDescriptorProto_Type) - *p = x - return p -} - -func (x FieldDescriptorProto_Type) String() string { - return proto.EnumName(FieldDescriptorProto_Type_name, int32(x)) -} - -func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type") - if err != nil { - return err - } - *x = FieldDescriptorProto_Type(value) - return nil -} - -func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{4, 0} -} - -type FieldDescriptorProto_Label int32 - -const ( - // 0 is reserved for errors - FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 - FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 - FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 -) - -var FieldDescriptorProto_Label_name = map[int32]string{ - 1: "LABEL_OPTIONAL", - 2: "LABEL_REQUIRED", - 3: "LABEL_REPEATED", -} - -var FieldDescriptorProto_Label_value = map[string]int32{ - "LABEL_OPTIONAL": 1, - "LABEL_REQUIRED": 2, - "LABEL_REPEATED": 3, -} - -func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { - p := new(FieldDescriptorProto_Label) - *p = x - return p -} - -func (x FieldDescriptorProto_Label) String() string { - return proto.EnumName(FieldDescriptorProto_Label_name, int32(x)) -} - -func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label") - if err != nil { - return err - } - *x = FieldDescriptorProto_Label(value) - return nil -} - -func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{4, 1} -} - -// Generated classes can be optimized for speed or code size. -type FileOptions_OptimizeMode int32 - -const ( - FileOptions_SPEED FileOptions_OptimizeMode = 1 - // etc. - FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2 - FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3 -) - -var FileOptions_OptimizeMode_name = map[int32]string{ - 1: "SPEED", - 2: "CODE_SIZE", - 3: "LITE_RUNTIME", -} - -var FileOptions_OptimizeMode_value = map[string]int32{ - "SPEED": 1, - "CODE_SIZE": 2, - "LITE_RUNTIME": 3, -} - -func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { - p := new(FileOptions_OptimizeMode) - *p = x - return p -} - -func (x FileOptions_OptimizeMode) String() string { - return proto.EnumName(FileOptions_OptimizeMode_name, int32(x)) -} - -func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode") - if err != nil { - return err - } - *x = FileOptions_OptimizeMode(value) - return nil -} - -func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{10, 0} -} - -type FieldOptions_CType int32 - -const ( - // Default mode. - FieldOptions_STRING FieldOptions_CType = 0 - FieldOptions_CORD FieldOptions_CType = 1 - FieldOptions_STRING_PIECE FieldOptions_CType = 2 -) - -var FieldOptions_CType_name = map[int32]string{ - 0: "STRING", - 1: "CORD", - 2: "STRING_PIECE", -} - -var FieldOptions_CType_value = map[string]int32{ - "STRING": 0, - "CORD": 1, - "STRING_PIECE": 2, -} - -func (x FieldOptions_CType) Enum() *FieldOptions_CType { - p := new(FieldOptions_CType) - *p = x - return p -} - -func (x FieldOptions_CType) String() string { - return proto.EnumName(FieldOptions_CType_name, int32(x)) -} - -func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType") - if err != nil { - return err - } - *x = FieldOptions_CType(value) - return nil -} - -func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{12, 0} -} - -type FieldOptions_JSType int32 - -const ( - // Use the default type. - FieldOptions_JS_NORMAL FieldOptions_JSType = 0 - // Use JavaScript strings. - FieldOptions_JS_STRING FieldOptions_JSType = 1 - // Use JavaScript numbers. - FieldOptions_JS_NUMBER FieldOptions_JSType = 2 -) - -var FieldOptions_JSType_name = map[int32]string{ - 0: "JS_NORMAL", - 1: "JS_STRING", - 2: "JS_NUMBER", -} - -var FieldOptions_JSType_value = map[string]int32{ - "JS_NORMAL": 0, - "JS_STRING": 1, - "JS_NUMBER": 2, -} - -func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { - p := new(FieldOptions_JSType) - *p = x - return p -} - -func (x FieldOptions_JSType) String() string { - return proto.EnumName(FieldOptions_JSType_name, int32(x)) -} - -func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType") - if err != nil { - return err - } - *x = FieldOptions_JSType(value) - return nil -} - -func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{12, 1} -} - -// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, -// or neither? HTTP based RPC implementation may choose GET verb for safe -// methods, and PUT verb for idempotent methods instead of the default POST. -type MethodOptions_IdempotencyLevel int32 - -const ( - MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0 - MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1 - MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2 -) - -var MethodOptions_IdempotencyLevel_name = map[int32]string{ - 0: "IDEMPOTENCY_UNKNOWN", - 1: "NO_SIDE_EFFECTS", - 2: "IDEMPOTENT", -} - -var MethodOptions_IdempotencyLevel_value = map[string]int32{ - "IDEMPOTENCY_UNKNOWN": 0, - "NO_SIDE_EFFECTS": 1, - "IDEMPOTENT": 2, -} - -func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { - p := new(MethodOptions_IdempotencyLevel) - *p = x - return p -} - -func (x MethodOptions_IdempotencyLevel) String() string { - return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x)) -} - -func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel") - if err != nil { - return err - } - *x = MethodOptions_IdempotencyLevel(value) - return nil -} - -func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{17, 0} -} - -// The protocol compiler can output a FileDescriptorSet containing the .proto -// files it parses. -type FileDescriptorSet struct { - File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } -func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } -func (*FileDescriptorSet) ProtoMessage() {} -func (*FileDescriptorSet) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{0} -} - -func (m *FileDescriptorSet) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FileDescriptorSet.Unmarshal(m, b) -} -func (m *FileDescriptorSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FileDescriptorSet.Marshal(b, m, deterministic) -} -func (m *FileDescriptorSet) XXX_Merge(src proto.Message) { - xxx_messageInfo_FileDescriptorSet.Merge(m, src) -} -func (m *FileDescriptorSet) XXX_Size() int { - return xxx_messageInfo_FileDescriptorSet.Size(m) -} -func (m *FileDescriptorSet) XXX_DiscardUnknown() { - xxx_messageInfo_FileDescriptorSet.DiscardUnknown(m) -} - -var xxx_messageInfo_FileDescriptorSet proto.InternalMessageInfo - -func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto { - if m != nil { - return m.File - } - return nil -} - -// Describes a complete .proto file. -type FileDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` - // Names of files imported by this file. - Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` - // Indexes of the public imported files in the dependency list above. - PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"` - // Indexes of the weak imported files in the dependency list. - // For Google-internal migration only. Do not use. - WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` - // All top-level definitions in this file. - MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` - EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` - Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` - Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` - Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` - // This field contains optional information about the original source code. - // You may safely remove this entire field without harming runtime - // functionality of the descriptors -- the information is needed only by - // development tools. - SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` - // The syntax of the proto file. - // The supported values are "proto2" and "proto3". - Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } -func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*FileDescriptorProto) ProtoMessage() {} -func (*FileDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{1} -} - -func (m *FileDescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FileDescriptorProto.Unmarshal(m, b) -} -func (m *FileDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FileDescriptorProto.Marshal(b, m, deterministic) -} -func (m *FileDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_FileDescriptorProto.Merge(m, src) -} -func (m *FileDescriptorProto) XXX_Size() int { - return xxx_messageInfo_FileDescriptorProto.Size(m) -} -func (m *FileDescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_FileDescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_FileDescriptorProto proto.InternalMessageInfo - -func (m *FileDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *FileDescriptorProto) GetPackage() string { - if m != nil && m.Package != nil { - return *m.Package - } - return "" -} - -func (m *FileDescriptorProto) GetDependency() []string { - if m != nil { - return m.Dependency - } - return nil -} - -func (m *FileDescriptorProto) GetPublicDependency() []int32 { - if m != nil { - return m.PublicDependency - } - return nil -} - -func (m *FileDescriptorProto) GetWeakDependency() []int32 { - if m != nil { - return m.WeakDependency - } - return nil -} - -func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto { - if m != nil { - return m.MessageType - } - return nil -} - -func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto { - if m != nil { - return m.EnumType - } - return nil -} - -func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto { - if m != nil { - return m.Service - } - return nil -} - -func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto { - if m != nil { - return m.Extension - } - return nil -} - -func (m *FileDescriptorProto) GetOptions() *FileOptions { - if m != nil { - return m.Options - } - return nil -} - -func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo { - if m != nil { - return m.SourceCodeInfo - } - return nil -} - -func (m *FileDescriptorProto) GetSyntax() string { - if m != nil && m.Syntax != nil { - return *m.Syntax - } - return "" -} - -// Describes a message type. -type DescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` - Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` - NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"` - EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` - ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"` - OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"` - Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` - ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` - // Reserved field names, which may not be used by fields in the same message. - // A given name may only be reserved once. - ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } -func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } -func (*DescriptorProto) ProtoMessage() {} -func (*DescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{2} -} - -func (m *DescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DescriptorProto.Unmarshal(m, b) -} -func (m *DescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DescriptorProto.Marshal(b, m, deterministic) -} -func (m *DescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_DescriptorProto.Merge(m, src) -} -func (m *DescriptorProto) XXX_Size() int { - return xxx_messageInfo_DescriptorProto.Size(m) -} -func (m *DescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_DescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_DescriptorProto proto.InternalMessageInfo - -func (m *DescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *DescriptorProto) GetField() []*FieldDescriptorProto { - if m != nil { - return m.Field - } - return nil -} - -func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto { - if m != nil { - return m.Extension - } - return nil -} - -func (m *DescriptorProto) GetNestedType() []*DescriptorProto { - if m != nil { - return m.NestedType - } - return nil -} - -func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto { - if m != nil { - return m.EnumType - } - return nil -} - -func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange { - if m != nil { - return m.ExtensionRange - } - return nil -} - -func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto { - if m != nil { - return m.OneofDecl - } - return nil -} - -func (m *DescriptorProto) GetOptions() *MessageOptions { - if m != nil { - return m.Options - } - return nil -} - -func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange { - if m != nil { - return m.ReservedRange - } - return nil -} - -func (m *DescriptorProto) GetReservedName() []string { - if m != nil { - return m.ReservedName - } - return nil -} - -type DescriptorProto_ExtensionRange struct { - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` - Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} } -func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) } -func (*DescriptorProto_ExtensionRange) ProtoMessage() {} -func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{2, 0} -} - -func (m *DescriptorProto_ExtensionRange) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DescriptorProto_ExtensionRange.Unmarshal(m, b) -} -func (m *DescriptorProto_ExtensionRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DescriptorProto_ExtensionRange.Marshal(b, m, deterministic) -} -func (m *DescriptorProto_ExtensionRange) XXX_Merge(src proto.Message) { - xxx_messageInfo_DescriptorProto_ExtensionRange.Merge(m, src) -} -func (m *DescriptorProto_ExtensionRange) XXX_Size() int { - return xxx_messageInfo_DescriptorProto_ExtensionRange.Size(m) -} -func (m *DescriptorProto_ExtensionRange) XXX_DiscardUnknown() { - xxx_messageInfo_DescriptorProto_ExtensionRange.DiscardUnknown(m) -} - -var xxx_messageInfo_DescriptorProto_ExtensionRange proto.InternalMessageInfo - -func (m *DescriptorProto_ExtensionRange) GetStart() int32 { - if m != nil && m.Start != nil { - return *m.Start - } - return 0 -} - -func (m *DescriptorProto_ExtensionRange) GetEnd() int32 { - if m != nil && m.End != nil { - return *m.End - } - return 0 -} - -func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { - if m != nil { - return m.Options - } - return nil -} - -// Range of reserved tag numbers. Reserved tag numbers may not be used by -// fields or extension ranges in the same message. Reserved ranges may -// not overlap. -type DescriptorProto_ReservedRange struct { - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} } -func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) } -func (*DescriptorProto_ReservedRange) ProtoMessage() {} -func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{2, 1} -} - -func (m *DescriptorProto_ReservedRange) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DescriptorProto_ReservedRange.Unmarshal(m, b) -} -func (m *DescriptorProto_ReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DescriptorProto_ReservedRange.Marshal(b, m, deterministic) -} -func (m *DescriptorProto_ReservedRange) XXX_Merge(src proto.Message) { - xxx_messageInfo_DescriptorProto_ReservedRange.Merge(m, src) -} -func (m *DescriptorProto_ReservedRange) XXX_Size() int { - return xxx_messageInfo_DescriptorProto_ReservedRange.Size(m) -} -func (m *DescriptorProto_ReservedRange) XXX_DiscardUnknown() { - xxx_messageInfo_DescriptorProto_ReservedRange.DiscardUnknown(m) -} - -var xxx_messageInfo_DescriptorProto_ReservedRange proto.InternalMessageInfo - -func (m *DescriptorProto_ReservedRange) GetStart() int32 { - if m != nil && m.Start != nil { - return *m.Start - } - return 0 -} - -func (m *DescriptorProto_ReservedRange) GetEnd() int32 { - if m != nil && m.End != nil { - return *m.End - } - return 0 -} - -type ExtensionRangeOptions struct { - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} } -func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) } -func (*ExtensionRangeOptions) ProtoMessage() {} -func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{3} -} - -var extRange_ExtensionRangeOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_ExtensionRangeOptions -} - -func (m *ExtensionRangeOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExtensionRangeOptions.Unmarshal(m, b) -} -func (m *ExtensionRangeOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExtensionRangeOptions.Marshal(b, m, deterministic) -} -func (m *ExtensionRangeOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExtensionRangeOptions.Merge(m, src) -} -func (m *ExtensionRangeOptions) XXX_Size() int { - return xxx_messageInfo_ExtensionRangeOptions.Size(m) -} -func (m *ExtensionRangeOptions) XXX_DiscardUnknown() { - xxx_messageInfo_ExtensionRangeOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_ExtensionRangeOptions proto.InternalMessageInfo - -func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -// Describes a field within a message. -type FieldDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` - Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` - // If type_name is set, this need not be set. If both this and type_name - // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. - Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"` - // For message and enum types, this is the name of the type. If the name - // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping - // rules are used to find the type (i.e. first the nested types within this - // message are searched, then within the parent, on up to the root - // namespace). - TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"` - // For extensions, this is the name of the type being extended. It is - // resolved in the same manner as type_name. - Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` - // For numeric types, contains the original text representation of the value. - // For booleans, "true" or "false". - // For strings, contains the default text contents (not escaped in any way). - // For bytes, contains the C escaped value. All bytes >= 128 are escaped. - // TODO(kenton): Base-64 encode? - DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` - // If set, gives the index of a oneof in the containing type's oneof_decl - // list. This field is a member of that oneof. - OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"` - // JSON name of this field. The value is set by protocol compiler. If the - // user has set a "json_name" option on this field, that option's value - // will be used. Otherwise, it's deduced from the field's name by converting - // it to camelCase. - JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` - Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } -func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*FieldDescriptorProto) ProtoMessage() {} -func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{4} -} - -func (m *FieldDescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FieldDescriptorProto.Unmarshal(m, b) -} -func (m *FieldDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FieldDescriptorProto.Marshal(b, m, deterministic) -} -func (m *FieldDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_FieldDescriptorProto.Merge(m, src) -} -func (m *FieldDescriptorProto) XXX_Size() int { - return xxx_messageInfo_FieldDescriptorProto.Size(m) -} -func (m *FieldDescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_FieldDescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_FieldDescriptorProto proto.InternalMessageInfo - -func (m *FieldDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *FieldDescriptorProto) GetNumber() int32 { - if m != nil && m.Number != nil { - return *m.Number - } - return 0 -} - -func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label { - if m != nil && m.Label != nil { - return *m.Label - } - return FieldDescriptorProto_LABEL_OPTIONAL -} - -func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type { - if m != nil && m.Type != nil { - return *m.Type - } - return FieldDescriptorProto_TYPE_DOUBLE -} - -func (m *FieldDescriptorProto) GetTypeName() string { - if m != nil && m.TypeName != nil { - return *m.TypeName - } - return "" -} - -func (m *FieldDescriptorProto) GetExtendee() string { - if m != nil && m.Extendee != nil { - return *m.Extendee - } - return "" -} - -func (m *FieldDescriptorProto) GetDefaultValue() string { - if m != nil && m.DefaultValue != nil { - return *m.DefaultValue - } - return "" -} - -func (m *FieldDescriptorProto) GetOneofIndex() int32 { - if m != nil && m.OneofIndex != nil { - return *m.OneofIndex - } - return 0 -} - -func (m *FieldDescriptorProto) GetJsonName() string { - if m != nil && m.JsonName != nil { - return *m.JsonName - } - return "" -} - -func (m *FieldDescriptorProto) GetOptions() *FieldOptions { - if m != nil { - return m.Options - } - return nil -} - -// Describes a oneof. -type OneofDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } -func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*OneofDescriptorProto) ProtoMessage() {} -func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{5} -} - -func (m *OneofDescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_OneofDescriptorProto.Unmarshal(m, b) -} -func (m *OneofDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_OneofDescriptorProto.Marshal(b, m, deterministic) -} -func (m *OneofDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_OneofDescriptorProto.Merge(m, src) -} -func (m *OneofDescriptorProto) XXX_Size() int { - return xxx_messageInfo_OneofDescriptorProto.Size(m) -} -func (m *OneofDescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_OneofDescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_OneofDescriptorProto proto.InternalMessageInfo - -func (m *OneofDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *OneofDescriptorProto) GetOptions() *OneofOptions { - if m != nil { - return m.Options - } - return nil -} - -// Describes an enum type. -type EnumDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` - Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - // Range of reserved numeric values. Reserved numeric values may not be used - // by enum values in the same enum declaration. Reserved ranges may not - // overlap. - ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` - // Reserved enum value names, which may not be reused. A given name may only - // be reserved once. - ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } -func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*EnumDescriptorProto) ProtoMessage() {} -func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{6} -} - -func (m *EnumDescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EnumDescriptorProto.Unmarshal(m, b) -} -func (m *EnumDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EnumDescriptorProto.Marshal(b, m, deterministic) -} -func (m *EnumDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_EnumDescriptorProto.Merge(m, src) -} -func (m *EnumDescriptorProto) XXX_Size() int { - return xxx_messageInfo_EnumDescriptorProto.Size(m) -} -func (m *EnumDescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_EnumDescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_EnumDescriptorProto proto.InternalMessageInfo - -func (m *EnumDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto { - if m != nil { - return m.Value - } - return nil -} - -func (m *EnumDescriptorProto) GetOptions() *EnumOptions { - if m != nil { - return m.Options - } - return nil -} - -func (m *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange { - if m != nil { - return m.ReservedRange - } - return nil -} - -func (m *EnumDescriptorProto) GetReservedName() []string { - if m != nil { - return m.ReservedName - } - return nil -} - -// Range of reserved numeric values. Reserved values may not be used by -// entries in the same enum. Reserved ranges may not overlap. -// -// Note that this is distinct from DescriptorProto.ReservedRange in that it -// is inclusive such that it can appropriately represent the entire int32 -// domain. -type EnumDescriptorProto_EnumReservedRange struct { - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *EnumDescriptorProto_EnumReservedRange) Reset() { *m = EnumDescriptorProto_EnumReservedRange{} } -func (m *EnumDescriptorProto_EnumReservedRange) String() string { return proto.CompactTextString(m) } -func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} -func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{6, 0} -} - -func (m *EnumDescriptorProto_EnumReservedRange) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Unmarshal(m, b) -} -func (m *EnumDescriptorProto_EnumReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Marshal(b, m, deterministic) -} -func (m *EnumDescriptorProto_EnumReservedRange) XXX_Merge(src proto.Message) { - xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Merge(m, src) -} -func (m *EnumDescriptorProto_EnumReservedRange) XXX_Size() int { - return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Size(m) -} -func (m *EnumDescriptorProto_EnumReservedRange) XXX_DiscardUnknown() { - xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.DiscardUnknown(m) -} - -var xxx_messageInfo_EnumDescriptorProto_EnumReservedRange proto.InternalMessageInfo - -func (m *EnumDescriptorProto_EnumReservedRange) GetStart() int32 { - if m != nil && m.Start != nil { - return *m.Start - } - return 0 -} - -func (m *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { - if m != nil && m.End != nil { - return *m.End - } - return 0 -} - -// Describes a value within an enum. -type EnumValueDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` - Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } -func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*EnumValueDescriptorProto) ProtoMessage() {} -func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{7} -} - -func (m *EnumValueDescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EnumValueDescriptorProto.Unmarshal(m, b) -} -func (m *EnumValueDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EnumValueDescriptorProto.Marshal(b, m, deterministic) -} -func (m *EnumValueDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_EnumValueDescriptorProto.Merge(m, src) -} -func (m *EnumValueDescriptorProto) XXX_Size() int { - return xxx_messageInfo_EnumValueDescriptorProto.Size(m) -} -func (m *EnumValueDescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_EnumValueDescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_EnumValueDescriptorProto proto.InternalMessageInfo - -func (m *EnumValueDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *EnumValueDescriptorProto) GetNumber() int32 { - if m != nil && m.Number != nil { - return *m.Number - } - return 0 -} - -func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { - if m != nil { - return m.Options - } - return nil -} - -// Describes a service. -type ServiceDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` - Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } -func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*ServiceDescriptorProto) ProtoMessage() {} -func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{8} -} - -func (m *ServiceDescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ServiceDescriptorProto.Unmarshal(m, b) -} -func (m *ServiceDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ServiceDescriptorProto.Marshal(b, m, deterministic) -} -func (m *ServiceDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServiceDescriptorProto.Merge(m, src) -} -func (m *ServiceDescriptorProto) XXX_Size() int { - return xxx_messageInfo_ServiceDescriptorProto.Size(m) -} -func (m *ServiceDescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_ServiceDescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_ServiceDescriptorProto proto.InternalMessageInfo - -func (m *ServiceDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto { - if m != nil { - return m.Method - } - return nil -} - -func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions { - if m != nil { - return m.Options - } - return nil -} - -// Describes a method of a service. -type MethodDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // Input and output type names. These are resolved in the same way as - // FieldDescriptorProto.type_name, but must refer to a message type. - InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` - OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"` - Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` - // Identifies if client streams multiple client messages - ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` - // Identifies if server streams multiple server messages - ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } -func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*MethodDescriptorProto) ProtoMessage() {} -func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{9} -} - -func (m *MethodDescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MethodDescriptorProto.Unmarshal(m, b) -} -func (m *MethodDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MethodDescriptorProto.Marshal(b, m, deterministic) -} -func (m *MethodDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_MethodDescriptorProto.Merge(m, src) -} -func (m *MethodDescriptorProto) XXX_Size() int { - return xxx_messageInfo_MethodDescriptorProto.Size(m) -} -func (m *MethodDescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_MethodDescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_MethodDescriptorProto proto.InternalMessageInfo - -const Default_MethodDescriptorProto_ClientStreaming bool = false -const Default_MethodDescriptorProto_ServerStreaming bool = false - -func (m *MethodDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *MethodDescriptorProto) GetInputType() string { - if m != nil && m.InputType != nil { - return *m.InputType - } - return "" -} - -func (m *MethodDescriptorProto) GetOutputType() string { - if m != nil && m.OutputType != nil { - return *m.OutputType - } - return "" -} - -func (m *MethodDescriptorProto) GetOptions() *MethodOptions { - if m != nil { - return m.Options - } - return nil -} - -func (m *MethodDescriptorProto) GetClientStreaming() bool { - if m != nil && m.ClientStreaming != nil { - return *m.ClientStreaming - } - return Default_MethodDescriptorProto_ClientStreaming -} - -func (m *MethodDescriptorProto) GetServerStreaming() bool { - if m != nil && m.ServerStreaming != nil { - return *m.ServerStreaming - } - return Default_MethodDescriptorProto_ServerStreaming -} - -type FileOptions struct { - // Sets the Java package where classes generated from this .proto will be - // placed. By default, the proto package is used, but this is often - // inappropriate because proto packages do not normally start with backwards - // domain names. - JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` - // If set, all the classes from the .proto file are wrapped in a single - // outer class with the given name. This applies to both Proto1 - // (equivalent to the old "--one_java_file" option) and Proto2 (where - // a .proto always translates to a single class, but you may want to - // explicitly choose the class name). - JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` - // If set true, then the Java code generator will generate a separate .java - // file for each top-level message, enum, and service defined in the .proto - // file. Thus, these types will *not* be nested inside the outer class - // named by java_outer_classname. However, the outer class will still be - // generated to contain the file's getDescriptor() method as well as any - // top-level extensions defined in the file. - JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` - // This option does nothing. - JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` // Deprecated: Do not use. - // If set true, then the Java2 code generator will generate code that - // throws an exception whenever an attempt is made to assign a non-UTF-8 - // byte sequence to a string field. - // Message reflection will do the same. - // However, an extension field still accepts non-UTF-8 byte sequences. - // This option has no effect on when used with the lite runtime. - JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` - OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` - // Sets the Go package where structs generated from this .proto will be - // placed. If omitted, the Go package will be derived from the following: - // - The basename of the package import path, if provided. - // - Otherwise, the package statement in the .proto file, if present. - // - Otherwise, the basename of the .proto file, without extension. - GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"` - // Should generic services be generated in each language? "Generic" services - // are not specific to any particular RPC system. They are generated by the - // main code generators in each language (without additional plugins). - // Generic services were the only kind of service generation supported by - // early versions of google.protobuf. - // - // Generic services are now considered deprecated in favor of using plugins - // that generate code specific to your particular RPC system. Therefore, - // these default to false. Old code which depends on generic services should - // explicitly set them to true. - CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` - JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` - PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` - PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` - // Is this file deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for everything in the file, or it will be completely ignored; in the very - // least, this is a formalization for deprecating files. - Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // Enables the use of arenas for the proto messages in this file. This applies - // only to generated classes for C++. - CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"` - // Sets the objective c class prefix which is prepended to all objective c - // generated classes from this .proto. There is no default. - ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` - // Namespace for generated classes; defaults to the package. - CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` - // By default Swift generators will take the proto package and CamelCase it - // replacing '.' with underscore and use that to prefix the types/symbols - // defined. When this options is provided, they will use this value instead - // to prefix the types/symbols defined. - SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"` - // Sets the php class prefix which is prepended to all php generated classes - // from this .proto. Default is empty. - PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"` - // Use this option to change the namespace of php generated classes. Default - // is empty. When this option is empty, the package name will be used for - // determining the namespace. - PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` - // Use this option to change the namespace of php generated metadata classes. - // Default is empty. When this option is empty, the proto file name will be used - // for determining the namespace. - PhpMetadataNamespace *string `protobuf:"bytes,44,opt,name=php_metadata_namespace,json=phpMetadataNamespace" json:"php_metadata_namespace,omitempty"` - // Use this option to change the package of ruby generated classes. Default - // is empty. When this option is not set, the package name will be used for - // determining the ruby package. - RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"` - // The parser stores options it doesn't recognize here. - // See the documentation for the "Options" section above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FileOptions) Reset() { *m = FileOptions{} } -func (m *FileOptions) String() string { return proto.CompactTextString(m) } -func (*FileOptions) ProtoMessage() {} -func (*FileOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{10} -} - -var extRange_FileOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_FileOptions -} - -func (m *FileOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FileOptions.Unmarshal(m, b) -} -func (m *FileOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FileOptions.Marshal(b, m, deterministic) -} -func (m *FileOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_FileOptions.Merge(m, src) -} -func (m *FileOptions) XXX_Size() int { - return xxx_messageInfo_FileOptions.Size(m) -} -func (m *FileOptions) XXX_DiscardUnknown() { - xxx_messageInfo_FileOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_FileOptions proto.InternalMessageInfo - -const Default_FileOptions_JavaMultipleFiles bool = false -const Default_FileOptions_JavaStringCheckUtf8 bool = false -const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED -const Default_FileOptions_CcGenericServices bool = false -const Default_FileOptions_JavaGenericServices bool = false -const Default_FileOptions_PyGenericServices bool = false -const Default_FileOptions_PhpGenericServices bool = false -const Default_FileOptions_Deprecated bool = false -const Default_FileOptions_CcEnableArenas bool = false - -func (m *FileOptions) GetJavaPackage() string { - if m != nil && m.JavaPackage != nil { - return *m.JavaPackage - } - return "" -} - -func (m *FileOptions) GetJavaOuterClassname() string { - if m != nil && m.JavaOuterClassname != nil { - return *m.JavaOuterClassname - } - return "" -} - -func (m *FileOptions) GetJavaMultipleFiles() bool { - if m != nil && m.JavaMultipleFiles != nil { - return *m.JavaMultipleFiles - } - return Default_FileOptions_JavaMultipleFiles -} - -// Deprecated: Do not use. -func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool { - if m != nil && m.JavaGenerateEqualsAndHash != nil { - return *m.JavaGenerateEqualsAndHash - } - return false -} - -func (m *FileOptions) GetJavaStringCheckUtf8() bool { - if m != nil && m.JavaStringCheckUtf8 != nil { - return *m.JavaStringCheckUtf8 - } - return Default_FileOptions_JavaStringCheckUtf8 -} - -func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode { - if m != nil && m.OptimizeFor != nil { - return *m.OptimizeFor - } - return Default_FileOptions_OptimizeFor -} - -func (m *FileOptions) GetGoPackage() string { - if m != nil && m.GoPackage != nil { - return *m.GoPackage - } - return "" -} - -func (m *FileOptions) GetCcGenericServices() bool { - if m != nil && m.CcGenericServices != nil { - return *m.CcGenericServices - } - return Default_FileOptions_CcGenericServices -} - -func (m *FileOptions) GetJavaGenericServices() bool { - if m != nil && m.JavaGenericServices != nil { - return *m.JavaGenericServices - } - return Default_FileOptions_JavaGenericServices -} - -func (m *FileOptions) GetPyGenericServices() bool { - if m != nil && m.PyGenericServices != nil { - return *m.PyGenericServices - } - return Default_FileOptions_PyGenericServices -} - -func (m *FileOptions) GetPhpGenericServices() bool { - if m != nil && m.PhpGenericServices != nil { - return *m.PhpGenericServices - } - return Default_FileOptions_PhpGenericServices -} - -func (m *FileOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_FileOptions_Deprecated -} - -func (m *FileOptions) GetCcEnableArenas() bool { - if m != nil && m.CcEnableArenas != nil { - return *m.CcEnableArenas - } - return Default_FileOptions_CcEnableArenas -} - -func (m *FileOptions) GetObjcClassPrefix() string { - if m != nil && m.ObjcClassPrefix != nil { - return *m.ObjcClassPrefix - } - return "" -} - -func (m *FileOptions) GetCsharpNamespace() string { - if m != nil && m.CsharpNamespace != nil { - return *m.CsharpNamespace - } - return "" -} - -func (m *FileOptions) GetSwiftPrefix() string { - if m != nil && m.SwiftPrefix != nil { - return *m.SwiftPrefix - } - return "" -} - -func (m *FileOptions) GetPhpClassPrefix() string { - if m != nil && m.PhpClassPrefix != nil { - return *m.PhpClassPrefix - } - return "" -} - -func (m *FileOptions) GetPhpNamespace() string { - if m != nil && m.PhpNamespace != nil { - return *m.PhpNamespace - } - return "" -} - -func (m *FileOptions) GetPhpMetadataNamespace() string { - if m != nil && m.PhpMetadataNamespace != nil { - return *m.PhpMetadataNamespace - } - return "" -} - -func (m *FileOptions) GetRubyPackage() string { - if m != nil && m.RubyPackage != nil { - return *m.RubyPackage - } - return "" -} - -func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type MessageOptions struct { - // Set true to use the old proto1 MessageSet wire format for extensions. - // This is provided for backwards-compatibility with the MessageSet wire - // format. You should not use this for any other reason: It's less - // efficient, has fewer features, and is more complicated. - // - // The message must be defined exactly as follows: - // message Foo { - // option message_set_wire_format = true; - // extensions 4 to max; - // } - // Note that the message cannot have any defined fields; MessageSets only - // have extensions. - // - // All extensions of your type must be singular messages; e.g. they cannot - // be int32s, enums, or repeated messages. - // - // Because this is an option, the above two restrictions are not enforced by - // the protocol compiler. - MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"` - // Disables the generation of the standard "descriptor()" accessor, which can - // conflict with a field of the same name. This is meant to make migration - // from proto1 easier; new code should avoid fields named "descriptor". - NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` - // Is this message deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the message, or it will be completely ignored; in the very least, - // this is a formalization for deprecating messages. - Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // Whether the message is an automatically generated map entry type for the - // maps field. - // - // For maps fields: - // map map_field = 1; - // The parsed descriptor looks like: - // message MapFieldEntry { - // option map_entry = true; - // optional KeyType key = 1; - // optional ValueType value = 2; - // } - // repeated MapFieldEntry map_field = 1; - // - // Implementations may choose not to generate the map_entry=true message, but - // use a native map in the target language to hold the keys and values. - // The reflection APIs in such implementions still need to work as - // if the field is a repeated message field. - // - // NOTE: Do not set the option in .proto files. Always use the maps syntax - // instead. The option should only be implicitly set by the proto compiler - // parser. - MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MessageOptions) Reset() { *m = MessageOptions{} } -func (m *MessageOptions) String() string { return proto.CompactTextString(m) } -func (*MessageOptions) ProtoMessage() {} -func (*MessageOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{11} -} - -var extRange_MessageOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_MessageOptions -} - -func (m *MessageOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MessageOptions.Unmarshal(m, b) -} -func (m *MessageOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MessageOptions.Marshal(b, m, deterministic) -} -func (m *MessageOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_MessageOptions.Merge(m, src) -} -func (m *MessageOptions) XXX_Size() int { - return xxx_messageInfo_MessageOptions.Size(m) -} -func (m *MessageOptions) XXX_DiscardUnknown() { - xxx_messageInfo_MessageOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_MessageOptions proto.InternalMessageInfo - -const Default_MessageOptions_MessageSetWireFormat bool = false -const Default_MessageOptions_NoStandardDescriptorAccessor bool = false -const Default_MessageOptions_Deprecated bool = false - -func (m *MessageOptions) GetMessageSetWireFormat() bool { - if m != nil && m.MessageSetWireFormat != nil { - return *m.MessageSetWireFormat - } - return Default_MessageOptions_MessageSetWireFormat -} - -func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool { - if m != nil && m.NoStandardDescriptorAccessor != nil { - return *m.NoStandardDescriptorAccessor - } - return Default_MessageOptions_NoStandardDescriptorAccessor -} - -func (m *MessageOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_MessageOptions_Deprecated -} - -func (m *MessageOptions) GetMapEntry() bool { - if m != nil && m.MapEntry != nil { - return *m.MapEntry - } - return false -} - -func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type FieldOptions struct { - // The ctype option instructs the C++ code generator to use a different - // representation of the field than it normally would. See the specific - // options below. This option is not yet implemented in the open source - // release -- sorry, we'll try to include it in a future version! - Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` - // The packed option can be enabled for repeated primitive fields to enable - // a more efficient representation on the wire. Rather than repeatedly - // writing the tag and type for each element, the entire array is encoded as - // a single length-delimited blob. In proto3, only explicit setting it to - // false will avoid using packed encoding. - Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` - // The jstype option determines the JavaScript type used for values of the - // field. The option is permitted only for 64 bit integral and fixed types - // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING - // is represented as JavaScript string, which avoids loss of precision that - // can happen when a large value is converted to a floating point JavaScript. - // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to - // use the JavaScript "number" type. The behavior of the default option - // JS_NORMAL is implementation dependent. - // - // This option is an enum to permit additional types to be added, e.g. - // goog.math.Integer. - Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` - // Should this field be parsed lazily? Lazy applies only to message-type - // fields. It means that when the outer message is initially parsed, the - // inner message's contents will not be parsed but instead stored in encoded - // form. The inner message will actually be parsed when it is first accessed. - // - // This is only a hint. Implementations are free to choose whether to use - // eager or lazy parsing regardless of the value of this option. However, - // setting this option true suggests that the protocol author believes that - // using lazy parsing on this field is worth the additional bookkeeping - // overhead typically needed to implement it. - // - // This option does not affect the public interface of any generated code; - // all method signatures remain the same. Furthermore, thread-safety of the - // interface is not affected by this option; const methods remain safe to - // call from multiple threads concurrently, while non-const methods continue - // to require exclusive access. - // - // - // Note that implementations may choose not to check required fields within - // a lazy sub-message. That is, calling IsInitialized() on the outer message - // may return true even if the inner message has missing required fields. - // This is necessary because otherwise the inner message would have to be - // parsed in order to perform the check, defeating the purpose of lazy - // parsing. An implementation which chooses not to check required fields - // must be consistent about it. That is, for any particular sub-message, the - // implementation must either *always* check its required fields, or *never* - // check its required fields, regardless of whether or not the message has - // been parsed. - Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` - // Is this field deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for accessors, or it will be completely ignored; in the very least, this - // is a formalization for deprecating fields. - Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // For Google-internal migration only. Do not use. - Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FieldOptions) Reset() { *m = FieldOptions{} } -func (m *FieldOptions) String() string { return proto.CompactTextString(m) } -func (*FieldOptions) ProtoMessage() {} -func (*FieldOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{12} -} - -var extRange_FieldOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_FieldOptions -} - -func (m *FieldOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FieldOptions.Unmarshal(m, b) -} -func (m *FieldOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FieldOptions.Marshal(b, m, deterministic) -} -func (m *FieldOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_FieldOptions.Merge(m, src) -} -func (m *FieldOptions) XXX_Size() int { - return xxx_messageInfo_FieldOptions.Size(m) -} -func (m *FieldOptions) XXX_DiscardUnknown() { - xxx_messageInfo_FieldOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_FieldOptions proto.InternalMessageInfo - -const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING -const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL -const Default_FieldOptions_Lazy bool = false -const Default_FieldOptions_Deprecated bool = false -const Default_FieldOptions_Weak bool = false - -func (m *FieldOptions) GetCtype() FieldOptions_CType { - if m != nil && m.Ctype != nil { - return *m.Ctype - } - return Default_FieldOptions_Ctype -} - -func (m *FieldOptions) GetPacked() bool { - if m != nil && m.Packed != nil { - return *m.Packed - } - return false -} - -func (m *FieldOptions) GetJstype() FieldOptions_JSType { - if m != nil && m.Jstype != nil { - return *m.Jstype - } - return Default_FieldOptions_Jstype -} - -func (m *FieldOptions) GetLazy() bool { - if m != nil && m.Lazy != nil { - return *m.Lazy - } - return Default_FieldOptions_Lazy -} - -func (m *FieldOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_FieldOptions_Deprecated -} - -func (m *FieldOptions) GetWeak() bool { - if m != nil && m.Weak != nil { - return *m.Weak - } - return Default_FieldOptions_Weak -} - -func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type OneofOptions struct { - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *OneofOptions) Reset() { *m = OneofOptions{} } -func (m *OneofOptions) String() string { return proto.CompactTextString(m) } -func (*OneofOptions) ProtoMessage() {} -func (*OneofOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{13} -} - -var extRange_OneofOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_OneofOptions -} - -func (m *OneofOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_OneofOptions.Unmarshal(m, b) -} -func (m *OneofOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_OneofOptions.Marshal(b, m, deterministic) -} -func (m *OneofOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_OneofOptions.Merge(m, src) -} -func (m *OneofOptions) XXX_Size() int { - return xxx_messageInfo_OneofOptions.Size(m) -} -func (m *OneofOptions) XXX_DiscardUnknown() { - xxx_messageInfo_OneofOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_OneofOptions proto.InternalMessageInfo - -func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type EnumOptions struct { - // Set this option to true to allow mapping different tag names to the same - // value. - AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` - // Is this enum deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the enum, or it will be completely ignored; in the very least, this - // is a formalization for deprecating enums. - Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *EnumOptions) Reset() { *m = EnumOptions{} } -func (m *EnumOptions) String() string { return proto.CompactTextString(m) } -func (*EnumOptions) ProtoMessage() {} -func (*EnumOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{14} -} - -var extRange_EnumOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_EnumOptions -} - -func (m *EnumOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EnumOptions.Unmarshal(m, b) -} -func (m *EnumOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EnumOptions.Marshal(b, m, deterministic) -} -func (m *EnumOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_EnumOptions.Merge(m, src) -} -func (m *EnumOptions) XXX_Size() int { - return xxx_messageInfo_EnumOptions.Size(m) -} -func (m *EnumOptions) XXX_DiscardUnknown() { - xxx_messageInfo_EnumOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_EnumOptions proto.InternalMessageInfo - -const Default_EnumOptions_Deprecated bool = false - -func (m *EnumOptions) GetAllowAlias() bool { - if m != nil && m.AllowAlias != nil { - return *m.AllowAlias - } - return false -} - -func (m *EnumOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_EnumOptions_Deprecated -} - -func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type EnumValueOptions struct { - // Is this enum value deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the enum value, or it will be completely ignored; in the very least, - // this is a formalization for deprecating enum values. - Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } -func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } -func (*EnumValueOptions) ProtoMessage() {} -func (*EnumValueOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{15} -} - -var extRange_EnumValueOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_EnumValueOptions -} - -func (m *EnumValueOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EnumValueOptions.Unmarshal(m, b) -} -func (m *EnumValueOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EnumValueOptions.Marshal(b, m, deterministic) -} -func (m *EnumValueOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_EnumValueOptions.Merge(m, src) -} -func (m *EnumValueOptions) XXX_Size() int { - return xxx_messageInfo_EnumValueOptions.Size(m) -} -func (m *EnumValueOptions) XXX_DiscardUnknown() { - xxx_messageInfo_EnumValueOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_EnumValueOptions proto.InternalMessageInfo - -const Default_EnumValueOptions_Deprecated bool = false - -func (m *EnumValueOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_EnumValueOptions_Deprecated -} - -func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type ServiceOptions struct { - // Is this service deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the service, or it will be completely ignored; in the very least, - // this is a formalization for deprecating services. - Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } -func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } -func (*ServiceOptions) ProtoMessage() {} -func (*ServiceOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{16} -} - -var extRange_ServiceOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_ServiceOptions -} - -func (m *ServiceOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ServiceOptions.Unmarshal(m, b) -} -func (m *ServiceOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ServiceOptions.Marshal(b, m, deterministic) -} -func (m *ServiceOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServiceOptions.Merge(m, src) -} -func (m *ServiceOptions) XXX_Size() int { - return xxx_messageInfo_ServiceOptions.Size(m) -} -func (m *ServiceOptions) XXX_DiscardUnknown() { - xxx_messageInfo_ServiceOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_ServiceOptions proto.InternalMessageInfo - -const Default_ServiceOptions_Deprecated bool = false - -func (m *ServiceOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_ServiceOptions_Deprecated -} - -func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type MethodOptions struct { - // Is this method deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the method, or it will be completely ignored; in the very least, - // this is a formalization for deprecating methods. - Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MethodOptions) Reset() { *m = MethodOptions{} } -func (m *MethodOptions) String() string { return proto.CompactTextString(m) } -func (*MethodOptions) ProtoMessage() {} -func (*MethodOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{17} -} - -var extRange_MethodOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_MethodOptions -} - -func (m *MethodOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MethodOptions.Unmarshal(m, b) -} -func (m *MethodOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MethodOptions.Marshal(b, m, deterministic) -} -func (m *MethodOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_MethodOptions.Merge(m, src) -} -func (m *MethodOptions) XXX_Size() int { - return xxx_messageInfo_MethodOptions.Size(m) -} -func (m *MethodOptions) XXX_DiscardUnknown() { - xxx_messageInfo_MethodOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_MethodOptions proto.InternalMessageInfo - -const Default_MethodOptions_Deprecated bool = false -const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN - -func (m *MethodOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_MethodOptions_Deprecated -} - -func (m *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { - if m != nil && m.IdempotencyLevel != nil { - return *m.IdempotencyLevel - } - return Default_MethodOptions_IdempotencyLevel -} - -func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -// A message representing a option the parser does not recognize. This only -// appears in options protos created by the compiler::Parser class. -// DescriptorPool resolves these when building Descriptor objects. Therefore, -// options protos in descriptor objects (e.g. returned by Descriptor::options(), -// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions -// in them. -type UninterpretedOption struct { - Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` - // The value of the uninterpreted option, in whatever type the tokenizer - // identified it as during parsing. Exactly one of these should be set. - IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` - PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` - NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` - DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` - StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` - AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } -func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } -func (*UninterpretedOption) ProtoMessage() {} -func (*UninterpretedOption) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{18} -} - -func (m *UninterpretedOption) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UninterpretedOption.Unmarshal(m, b) -} -func (m *UninterpretedOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UninterpretedOption.Marshal(b, m, deterministic) -} -func (m *UninterpretedOption) XXX_Merge(src proto.Message) { - xxx_messageInfo_UninterpretedOption.Merge(m, src) -} -func (m *UninterpretedOption) XXX_Size() int { - return xxx_messageInfo_UninterpretedOption.Size(m) -} -func (m *UninterpretedOption) XXX_DiscardUnknown() { - xxx_messageInfo_UninterpretedOption.DiscardUnknown(m) -} - -var xxx_messageInfo_UninterpretedOption proto.InternalMessageInfo - -func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { - if m != nil { - return m.Name - } - return nil -} - -func (m *UninterpretedOption) GetIdentifierValue() string { - if m != nil && m.IdentifierValue != nil { - return *m.IdentifierValue - } - return "" -} - -func (m *UninterpretedOption) GetPositiveIntValue() uint64 { - if m != nil && m.PositiveIntValue != nil { - return *m.PositiveIntValue - } - return 0 -} - -func (m *UninterpretedOption) GetNegativeIntValue() int64 { - if m != nil && m.NegativeIntValue != nil { - return *m.NegativeIntValue - } - return 0 -} - -func (m *UninterpretedOption) GetDoubleValue() float64 { - if m != nil && m.DoubleValue != nil { - return *m.DoubleValue - } - return 0 -} - -func (m *UninterpretedOption) GetStringValue() []byte { - if m != nil { - return m.StringValue - } - return nil -} - -func (m *UninterpretedOption) GetAggregateValue() string { - if m != nil && m.AggregateValue != nil { - return *m.AggregateValue - } - return "" -} - -// The name of the uninterpreted option. Each string represents a segment in -// a dot-separated name. is_extension is true iff a segment represents an -// extension (denoted with parentheses in options specs in .proto files). -// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents -// "foo.(bar.baz).qux". -type UninterpretedOption_NamePart struct { - NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` - IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} } -func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } -func (*UninterpretedOption_NamePart) ProtoMessage() {} -func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{18, 0} -} - -func (m *UninterpretedOption_NamePart) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UninterpretedOption_NamePart.Unmarshal(m, b) -} -func (m *UninterpretedOption_NamePart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UninterpretedOption_NamePart.Marshal(b, m, deterministic) -} -func (m *UninterpretedOption_NamePart) XXX_Merge(src proto.Message) { - xxx_messageInfo_UninterpretedOption_NamePart.Merge(m, src) -} -func (m *UninterpretedOption_NamePart) XXX_Size() int { - return xxx_messageInfo_UninterpretedOption_NamePart.Size(m) -} -func (m *UninterpretedOption_NamePart) XXX_DiscardUnknown() { - xxx_messageInfo_UninterpretedOption_NamePart.DiscardUnknown(m) -} - -var xxx_messageInfo_UninterpretedOption_NamePart proto.InternalMessageInfo - -func (m *UninterpretedOption_NamePart) GetNamePart() string { - if m != nil && m.NamePart != nil { - return *m.NamePart - } - return "" -} - -func (m *UninterpretedOption_NamePart) GetIsExtension() bool { - if m != nil && m.IsExtension != nil { - return *m.IsExtension - } - return false -} - -// Encapsulates information about the original source file from which a -// FileDescriptorProto was generated. -type SourceCodeInfo struct { - // A Location identifies a piece of source code in a .proto file which - // corresponds to a particular definition. This information is intended - // to be useful to IDEs, code indexers, documentation generators, and similar - // tools. - // - // For example, say we have a file like: - // message Foo { - // optional string foo = 1; - // } - // Let's look at just the field definition: - // optional string foo = 1; - // ^ ^^ ^^ ^ ^^^ - // a bc de f ghi - // We have the following locations: - // span path represents - // [a,i) [ 4, 0, 2, 0 ] The whole field definition. - // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). - // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). - // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). - // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). - // - // Notes: - // - A location may refer to a repeated field itself (i.e. not to any - // particular index within it). This is used whenever a set of elements are - // logically enclosed in a single code segment. For example, an entire - // extend block (possibly containing multiple extension definitions) will - // have an outer location whose path refers to the "extensions" repeated - // field without an index. - // - Multiple locations may have the same path. This happens when a single - // logical declaration is spread out across multiple places. The most - // obvious example is the "extend" block again -- there may be multiple - // extend blocks in the same scope, each of which will have the same path. - // - A location's span is not always a subset of its parent's span. For - // example, the "extendee" of an extension declaration appears at the - // beginning of the "extend" block and is shared by all extensions within - // the block. - // - Just because a location's span is a subset of some other location's span - // does not mean that it is a descendent. For example, a "group" defines - // both a type and a field in a single declaration. Thus, the locations - // corresponding to the type and field and their components will overlap. - // - Code which tries to interpret locations should probably be designed to - // ignore those that it doesn't understand, as more types of locations could - // be recorded in the future. - Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } -func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } -func (*SourceCodeInfo) ProtoMessage() {} -func (*SourceCodeInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{19} -} - -func (m *SourceCodeInfo) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SourceCodeInfo.Unmarshal(m, b) -} -func (m *SourceCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SourceCodeInfo.Marshal(b, m, deterministic) -} -func (m *SourceCodeInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_SourceCodeInfo.Merge(m, src) -} -func (m *SourceCodeInfo) XXX_Size() int { - return xxx_messageInfo_SourceCodeInfo.Size(m) -} -func (m *SourceCodeInfo) XXX_DiscardUnknown() { - xxx_messageInfo_SourceCodeInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_SourceCodeInfo proto.InternalMessageInfo - -func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { - if m != nil { - return m.Location - } - return nil -} - -type SourceCodeInfo_Location struct { - // Identifies which part of the FileDescriptorProto was defined at this - // location. - // - // Each element is a field number or an index. They form a path from - // the root FileDescriptorProto to the place where the definition. For - // example, this path: - // [ 4, 3, 2, 7, 1 ] - // refers to: - // file.message_type(3) // 4, 3 - // .field(7) // 2, 7 - // .name() // 1 - // This is because FileDescriptorProto.message_type has field number 4: - // repeated DescriptorProto message_type = 4; - // and DescriptorProto.field has field number 2: - // repeated FieldDescriptorProto field = 2; - // and FieldDescriptorProto.name has field number 1: - // optional string name = 1; - // - // Thus, the above path gives the location of a field name. If we removed - // the last element: - // [ 4, 3, 2, 7 ] - // this path refers to the whole field declaration (from the beginning - // of the label to the terminating semicolon). - Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` - // Always has exactly three or four elements: start line, start column, - // end line (optional, otherwise assumed same as start line), end column. - // These are packed into a single field for efficiency. Note that line - // and column numbers are zero-based -- typically you will want to add - // 1 to each before displaying to a user. - Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"` - // If this SourceCodeInfo represents a complete declaration, these are any - // comments appearing before and after the declaration which appear to be - // attached to the declaration. - // - // A series of line comments appearing on consecutive lines, with no other - // tokens appearing on those lines, will be treated as a single comment. - // - // leading_detached_comments will keep paragraphs of comments that appear - // before (but not connected to) the current element. Each paragraph, - // separated by empty lines, will be one comment element in the repeated - // field. - // - // Only the comment content is provided; comment markers (e.g. //) are - // stripped out. For block comments, leading whitespace and an asterisk - // will be stripped from the beginning of each line other than the first. - // Newlines are included in the output. - // - // Examples: - // - // optional int32 foo = 1; // Comment attached to foo. - // // Comment attached to bar. - // optional int32 bar = 2; - // - // optional string baz = 3; - // // Comment attached to baz. - // // Another line attached to baz. - // - // // Comment attached to qux. - // // - // // Another line attached to qux. - // optional double qux = 4; - // - // // Detached comment for corge. This is not leading or trailing comments - // // to qux or corge because there are blank lines separating it from - // // both. - // - // // Detached comment for corge paragraph 2. - // - // optional string corge = 5; - // /* Block comment attached - // * to corge. Leading asterisks - // * will be removed. */ - // /* Block comment attached to - // * grault. */ - // optional int32 grault = 6; - // - // // ignored detached comments. - LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` - TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` - LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } -func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } -func (*SourceCodeInfo_Location) ProtoMessage() {} -func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{19, 0} -} - -func (m *SourceCodeInfo_Location) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SourceCodeInfo_Location.Unmarshal(m, b) -} -func (m *SourceCodeInfo_Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SourceCodeInfo_Location.Marshal(b, m, deterministic) -} -func (m *SourceCodeInfo_Location) XXX_Merge(src proto.Message) { - xxx_messageInfo_SourceCodeInfo_Location.Merge(m, src) -} -func (m *SourceCodeInfo_Location) XXX_Size() int { - return xxx_messageInfo_SourceCodeInfo_Location.Size(m) -} -func (m *SourceCodeInfo_Location) XXX_DiscardUnknown() { - xxx_messageInfo_SourceCodeInfo_Location.DiscardUnknown(m) -} - -var xxx_messageInfo_SourceCodeInfo_Location proto.InternalMessageInfo - -func (m *SourceCodeInfo_Location) GetPath() []int32 { - if m != nil { - return m.Path - } - return nil -} - -func (m *SourceCodeInfo_Location) GetSpan() []int32 { - if m != nil { - return m.Span - } - return nil -} - -func (m *SourceCodeInfo_Location) GetLeadingComments() string { - if m != nil && m.LeadingComments != nil { - return *m.LeadingComments - } - return "" -} - -func (m *SourceCodeInfo_Location) GetTrailingComments() string { - if m != nil && m.TrailingComments != nil { - return *m.TrailingComments - } - return "" -} - -func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { - if m != nil { - return m.LeadingDetachedComments - } - return nil -} - -// Describes the relationship between generated code and its original source -// file. A GeneratedCodeInfo message is associated with only one generated -// source file, but may contain references to different source .proto files. -type GeneratedCodeInfo struct { - // An Annotation connects some span of text in generated code to an element - // of its generating .proto file. - Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } -func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } -func (*GeneratedCodeInfo) ProtoMessage() {} -func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{20} -} - -func (m *GeneratedCodeInfo) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GeneratedCodeInfo.Unmarshal(m, b) -} -func (m *GeneratedCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GeneratedCodeInfo.Marshal(b, m, deterministic) -} -func (m *GeneratedCodeInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_GeneratedCodeInfo.Merge(m, src) -} -func (m *GeneratedCodeInfo) XXX_Size() int { - return xxx_messageInfo_GeneratedCodeInfo.Size(m) -} -func (m *GeneratedCodeInfo) XXX_DiscardUnknown() { - xxx_messageInfo_GeneratedCodeInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_GeneratedCodeInfo proto.InternalMessageInfo - -func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { - if m != nil { - return m.Annotation - } - return nil -} - -type GeneratedCodeInfo_Annotation struct { - // Identifies the element in the original source .proto file. This field - // is formatted the same as SourceCodeInfo.Location.path. - Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` - // Identifies the filesystem path to the original source .proto. - SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"` - // Identifies the starting offset in bytes in the generated code - // that relates to the identified object. - Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` - // Identifies the ending offset in bytes in the generated code that - // relates to the identified offset. The end offset should be one past - // the last relevant byte (so the length of the text = end - begin). - End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} } -func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } -func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} -func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{20, 0} -} - -func (m *GeneratedCodeInfo_Annotation) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GeneratedCodeInfo_Annotation.Unmarshal(m, b) -} -func (m *GeneratedCodeInfo_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GeneratedCodeInfo_Annotation.Marshal(b, m, deterministic) -} -func (m *GeneratedCodeInfo_Annotation) XXX_Merge(src proto.Message) { - xxx_messageInfo_GeneratedCodeInfo_Annotation.Merge(m, src) -} -func (m *GeneratedCodeInfo_Annotation) XXX_Size() int { - return xxx_messageInfo_GeneratedCodeInfo_Annotation.Size(m) -} -func (m *GeneratedCodeInfo_Annotation) XXX_DiscardUnknown() { - xxx_messageInfo_GeneratedCodeInfo_Annotation.DiscardUnknown(m) -} - -var xxx_messageInfo_GeneratedCodeInfo_Annotation proto.InternalMessageInfo - -func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { - if m != nil { - return m.Path - } - return nil -} - -func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string { - if m != nil && m.SourceFile != nil { - return *m.SourceFile - } - return "" -} - -func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 { - if m != nil && m.Begin != nil { - return *m.Begin - } - return 0 -} - -func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 { - if m != nil && m.End != nil { - return *m.End - } - return 0 -} - -func init() { - proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value) - proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value) - proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value) - proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value) - proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value) - proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value) - proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet") - proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto") - proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto") - proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange") - proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange") - proto.RegisterType((*ExtensionRangeOptions)(nil), "google.protobuf.ExtensionRangeOptions") - proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto") - proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto") - proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto") - proto.RegisterType((*EnumDescriptorProto_EnumReservedRange)(nil), "google.protobuf.EnumDescriptorProto.EnumReservedRange") - proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto") - proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto") - proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto") - proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions") - proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions") - proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions") - proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions") - proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions") - proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions") - proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions") - proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions") - proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption") - proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart") - proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo") - proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location") - proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo") - proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation") -} - -func init() { proto.RegisterFile("google/protobuf/descriptor.proto", fileDescriptor_e5baabe45344a177) } - -var fileDescriptor_e5baabe45344a177 = []byte{ - // 2589 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xdd, 0x8e, 0xdb, 0xc6, - 0x15, 0x0e, 0xf5, 0xb7, 0xd2, 0x91, 0x56, 0x3b, 0x3b, 0xbb, 0xb1, 0xe9, 0xcd, 0x8f, 0xd7, 0xca, - 0x8f, 0xd7, 0x4e, 0xac, 0x0d, 0x1c, 0xdb, 0x71, 0xd6, 0x45, 0x5a, 0xad, 0x44, 0x6f, 0xe4, 0xee, - 0x4a, 0x2a, 0xa5, 0x6d, 0x7e, 0x80, 0x82, 0x98, 0x25, 0x47, 0x12, 0x6d, 0x8a, 0x64, 0x48, 0xca, - 0xf6, 0x06, 0xbd, 0x30, 0xd0, 0xab, 0x5e, 0x15, 0xe8, 0x55, 0x51, 0x14, 0xbd, 0xe8, 0x4d, 0x80, - 0x3e, 0x40, 0x81, 0xde, 0xf5, 0x09, 0x0a, 0xe4, 0x0d, 0x8a, 0xb6, 0x40, 0xfb, 0x08, 0xbd, 0x2c, - 0x66, 0x86, 0xa4, 0x48, 0x49, 0x1b, 0x6f, 0x02, 0xc4, 0xb9, 0x92, 0xe6, 0x3b, 0xdf, 0x39, 0x73, - 0xe6, 0xcc, 0x99, 0x99, 0x33, 0x43, 0xd8, 0x1e, 0x39, 0xce, 0xc8, 0xa2, 0xbb, 0xae, 0xe7, 0x04, - 0xce, 0xc9, 0x74, 0xb8, 0x6b, 0x50, 0x5f, 0xf7, 0x4c, 0x37, 0x70, 0xbc, 0x3a, 0xc7, 0xf0, 0x9a, - 0x60, 0xd4, 0x23, 0x46, 0xed, 0x08, 0xd6, 0xef, 0x9b, 0x16, 0x6d, 0xc5, 0xc4, 0x3e, 0x0d, 0xf0, - 0x5d, 0xc8, 0x0d, 0x4d, 0x8b, 0xca, 0xd2, 0x76, 0x76, 0xa7, 0x7c, 0xf3, 0xcd, 0xfa, 0x9c, 0x52, - 0x3d, 0xad, 0xd1, 0x63, 0xb0, 0xca, 0x35, 0x6a, 0xff, 0xce, 0xc1, 0xc6, 0x12, 0x29, 0xc6, 0x90, - 0xb3, 0xc9, 0x84, 0x59, 0x94, 0x76, 0x4a, 0x2a, 0xff, 0x8f, 0x65, 0x58, 0x71, 0x89, 0xfe, 0x88, - 0x8c, 0xa8, 0x9c, 0xe1, 0x70, 0xd4, 0xc4, 0xaf, 0x03, 0x18, 0xd4, 0xa5, 0xb6, 0x41, 0x6d, 0xfd, - 0x54, 0xce, 0x6e, 0x67, 0x77, 0x4a, 0x6a, 0x02, 0xc1, 0xef, 0xc0, 0xba, 0x3b, 0x3d, 0xb1, 0x4c, - 0x5d, 0x4b, 0xd0, 0x60, 0x3b, 0xbb, 0x93, 0x57, 0x91, 0x10, 0xb4, 0x66, 0xe4, 0xab, 0xb0, 0xf6, - 0x84, 0x92, 0x47, 0x49, 0x6a, 0x99, 0x53, 0xab, 0x0c, 0x4e, 0x10, 0x9b, 0x50, 0x99, 0x50, 0xdf, - 0x27, 0x23, 0xaa, 0x05, 0xa7, 0x2e, 0x95, 0x73, 0x7c, 0xf4, 0xdb, 0x0b, 0xa3, 0x9f, 0x1f, 0x79, - 0x39, 0xd4, 0x1a, 0x9c, 0xba, 0x14, 0x37, 0xa0, 0x44, 0xed, 0xe9, 0x44, 0x58, 0xc8, 0x9f, 0x11, - 0x3f, 0xc5, 0x9e, 0x4e, 0xe6, 0xad, 0x14, 0x99, 0x5a, 0x68, 0x62, 0xc5, 0xa7, 0xde, 0x63, 0x53, - 0xa7, 0x72, 0x81, 0x1b, 0xb8, 0xba, 0x60, 0xa0, 0x2f, 0xe4, 0xf3, 0x36, 0x22, 0x3d, 0xdc, 0x84, - 0x12, 0x7d, 0x1a, 0x50, 0xdb, 0x37, 0x1d, 0x5b, 0x5e, 0xe1, 0x46, 0xde, 0x5a, 0x32, 0x8b, 0xd4, - 0x32, 0xe6, 0x4d, 0xcc, 0xf4, 0xf0, 0x1d, 0x58, 0x71, 0xdc, 0xc0, 0x74, 0x6c, 0x5f, 0x2e, 0x6e, - 0x4b, 0x3b, 0xe5, 0x9b, 0xaf, 0x2e, 0x4d, 0x84, 0xae, 0xe0, 0xa8, 0x11, 0x19, 0xb7, 0x01, 0xf9, - 0xce, 0xd4, 0xd3, 0xa9, 0xa6, 0x3b, 0x06, 0xd5, 0x4c, 0x7b, 0xe8, 0xc8, 0x25, 0x6e, 0xe0, 0xf2, - 0xe2, 0x40, 0x38, 0xb1, 0xe9, 0x18, 0xb4, 0x6d, 0x0f, 0x1d, 0xb5, 0xea, 0xa7, 0xda, 0xf8, 0x02, - 0x14, 0xfc, 0x53, 0x3b, 0x20, 0x4f, 0xe5, 0x0a, 0xcf, 0x90, 0xb0, 0x55, 0xfb, 0x6b, 0x01, 0xd6, - 0xce, 0x93, 0x62, 0xf7, 0x20, 0x3f, 0x64, 0xa3, 0x94, 0x33, 0xdf, 0x26, 0x06, 0x42, 0x27, 0x1d, - 0xc4, 0xc2, 0x77, 0x0c, 0x62, 0x03, 0xca, 0x36, 0xf5, 0x03, 0x6a, 0x88, 0x8c, 0xc8, 0x9e, 0x33, - 0xa7, 0x40, 0x28, 0x2d, 0xa6, 0x54, 0xee, 0x3b, 0xa5, 0xd4, 0xa7, 0xb0, 0x16, 0xbb, 0xa4, 0x79, - 0xc4, 0x1e, 0x45, 0xb9, 0xb9, 0xfb, 0x3c, 0x4f, 0xea, 0x4a, 0xa4, 0xa7, 0x32, 0x35, 0xb5, 0x4a, - 0x53, 0x6d, 0xdc, 0x02, 0x70, 0x6c, 0xea, 0x0c, 0x35, 0x83, 0xea, 0x96, 0x5c, 0x3c, 0x23, 0x4a, - 0x5d, 0x46, 0x59, 0x88, 0x92, 0x23, 0x50, 0xdd, 0xc2, 0x1f, 0xce, 0x52, 0x6d, 0xe5, 0x8c, 0x4c, - 0x39, 0x12, 0x8b, 0x6c, 0x21, 0xdb, 0x8e, 0xa1, 0xea, 0x51, 0x96, 0xf7, 0xd4, 0x08, 0x47, 0x56, - 0xe2, 0x4e, 0xd4, 0x9f, 0x3b, 0x32, 0x35, 0x54, 0x13, 0x03, 0x5b, 0xf5, 0x92, 0x4d, 0xfc, 0x06, - 0xc4, 0x80, 0xc6, 0xd3, 0x0a, 0xf8, 0x2e, 0x54, 0x89, 0xc0, 0x0e, 0x99, 0xd0, 0xad, 0x2f, 0xa1, - 0x9a, 0x0e, 0x0f, 0xde, 0x84, 0xbc, 0x1f, 0x10, 0x2f, 0xe0, 0x59, 0x98, 0x57, 0x45, 0x03, 0x23, - 0xc8, 0x52, 0xdb, 0xe0, 0xbb, 0x5c, 0x5e, 0x65, 0x7f, 0xf1, 0x4f, 0x66, 0x03, 0xce, 0xf2, 0x01, - 0xbf, 0xbd, 0x38, 0xa3, 0x29, 0xcb, 0xf3, 0xe3, 0xde, 0xfa, 0x00, 0x56, 0x53, 0x03, 0x38, 0x6f, - 0xd7, 0xb5, 0x5f, 0xc2, 0xcb, 0x4b, 0x4d, 0xe3, 0x4f, 0x61, 0x73, 0x6a, 0x9b, 0x76, 0x40, 0x3d, - 0xd7, 0xa3, 0x2c, 0x63, 0x45, 0x57, 0xf2, 0x7f, 0x56, 0xce, 0xc8, 0xb9, 0xe3, 0x24, 0x5b, 0x58, - 0x51, 0x37, 0xa6, 0x8b, 0xe0, 0xf5, 0x52, 0xf1, 0xbf, 0x2b, 0xe8, 0xd9, 0xb3, 0x67, 0xcf, 0x32, - 0xb5, 0xdf, 0x15, 0x60, 0x73, 0xd9, 0x9a, 0x59, 0xba, 0x7c, 0x2f, 0x40, 0xc1, 0x9e, 0x4e, 0x4e, - 0xa8, 0xc7, 0x83, 0x94, 0x57, 0xc3, 0x16, 0x6e, 0x40, 0xde, 0x22, 0x27, 0xd4, 0x92, 0x73, 0xdb, - 0xd2, 0x4e, 0xf5, 0xe6, 0x3b, 0xe7, 0x5a, 0x95, 0xf5, 0x43, 0xa6, 0xa2, 0x0a, 0x4d, 0xfc, 0x11, - 0xe4, 0xc2, 0x2d, 0x9a, 0x59, 0xb8, 0x7e, 0x3e, 0x0b, 0x6c, 0x2d, 0xa9, 0x5c, 0x0f, 0xbf, 0x02, - 0x25, 0xf6, 0x2b, 0x72, 0xa3, 0xc0, 0x7d, 0x2e, 0x32, 0x80, 0xe5, 0x05, 0xde, 0x82, 0x22, 0x5f, - 0x26, 0x06, 0x8d, 0x8e, 0xb6, 0xb8, 0xcd, 0x12, 0xcb, 0xa0, 0x43, 0x32, 0xb5, 0x02, 0xed, 0x31, - 0xb1, 0xa6, 0x94, 0x27, 0x7c, 0x49, 0xad, 0x84, 0xe0, 0xcf, 0x19, 0x86, 0x2f, 0x43, 0x59, 0xac, - 0x2a, 0xd3, 0x36, 0xe8, 0x53, 0xbe, 0x7b, 0xe6, 0x55, 0xb1, 0xd0, 0xda, 0x0c, 0x61, 0xdd, 0x3f, - 0xf4, 0x1d, 0x3b, 0x4a, 0x4d, 0xde, 0x05, 0x03, 0x78, 0xf7, 0x1f, 0xcc, 0x6f, 0xdc, 0xaf, 0x2d, - 0x1f, 0xde, 0x7c, 0x4e, 0xd5, 0xfe, 0x92, 0x81, 0x1c, 0xdf, 0x2f, 0xd6, 0xa0, 0x3c, 0xf8, 0xac, - 0xa7, 0x68, 0xad, 0xee, 0xf1, 0xfe, 0xa1, 0x82, 0x24, 0x5c, 0x05, 0xe0, 0xc0, 0xfd, 0xc3, 0x6e, - 0x63, 0x80, 0x32, 0x71, 0xbb, 0xdd, 0x19, 0xdc, 0xb9, 0x85, 0xb2, 0xb1, 0xc2, 0xb1, 0x00, 0x72, - 0x49, 0xc2, 0xfb, 0x37, 0x51, 0x1e, 0x23, 0xa8, 0x08, 0x03, 0xed, 0x4f, 0x95, 0xd6, 0x9d, 0x5b, - 0xa8, 0x90, 0x46, 0xde, 0xbf, 0x89, 0x56, 0xf0, 0x2a, 0x94, 0x38, 0xb2, 0xdf, 0xed, 0x1e, 0xa2, - 0x62, 0x6c, 0xb3, 0x3f, 0x50, 0xdb, 0x9d, 0x03, 0x54, 0x8a, 0x6d, 0x1e, 0xa8, 0xdd, 0xe3, 0x1e, - 0x82, 0xd8, 0xc2, 0x91, 0xd2, 0xef, 0x37, 0x0e, 0x14, 0x54, 0x8e, 0x19, 0xfb, 0x9f, 0x0d, 0x94, - 0x3e, 0xaa, 0xa4, 0xdc, 0x7a, 0xff, 0x26, 0x5a, 0x8d, 0xbb, 0x50, 0x3a, 0xc7, 0x47, 0xa8, 0x8a, - 0xd7, 0x61, 0x55, 0x74, 0x11, 0x39, 0xb1, 0x36, 0x07, 0xdd, 0xb9, 0x85, 0xd0, 0xcc, 0x11, 0x61, - 0x65, 0x3d, 0x05, 0xdc, 0xb9, 0x85, 0x70, 0xad, 0x09, 0x79, 0x9e, 0x5d, 0x18, 0x43, 0xf5, 0xb0, - 0xb1, 0xaf, 0x1c, 0x6a, 0xdd, 0xde, 0xa0, 0xdd, 0xed, 0x34, 0x0e, 0x91, 0x34, 0xc3, 0x54, 0xe5, - 0x67, 0xc7, 0x6d, 0x55, 0x69, 0xa1, 0x4c, 0x12, 0xeb, 0x29, 0x8d, 0x81, 0xd2, 0x42, 0xd9, 0x9a, - 0x0e, 0x9b, 0xcb, 0xf6, 0xc9, 0xa5, 0x2b, 0x23, 0x31, 0xc5, 0x99, 0x33, 0xa6, 0x98, 0xdb, 0x5a, - 0x98, 0xe2, 0x7f, 0x65, 0x60, 0x63, 0xc9, 0x59, 0xb1, 0xb4, 0x93, 0x1f, 0x43, 0x5e, 0xa4, 0xa8, - 0x38, 0x3d, 0xaf, 0x2d, 0x3d, 0x74, 0x78, 0xc2, 0x2e, 0x9c, 0xa0, 0x5c, 0x2f, 0x59, 0x41, 0x64, - 0xcf, 0xa8, 0x20, 0x98, 0x89, 0x85, 0x3d, 0xfd, 0x17, 0x0b, 0x7b, 0xba, 0x38, 0xf6, 0xee, 0x9c, - 0xe7, 0xd8, 0xe3, 0xd8, 0xb7, 0xdb, 0xdb, 0xf3, 0x4b, 0xf6, 0xf6, 0x7b, 0xb0, 0xbe, 0x60, 0xe8, - 0xdc, 0x7b, 0xec, 0xaf, 0x24, 0x90, 0xcf, 0x0a, 0xce, 0x73, 0x76, 0xba, 0x4c, 0x6a, 0xa7, 0xbb, - 0x37, 0x1f, 0xc1, 0x2b, 0x67, 0x4f, 0xc2, 0xc2, 0x5c, 0x7f, 0x25, 0xc1, 0x85, 0xe5, 0x95, 0xe2, - 0x52, 0x1f, 0x3e, 0x82, 0xc2, 0x84, 0x06, 0x63, 0x27, 0xaa, 0x96, 0xde, 0x5e, 0x72, 0x06, 0x33, - 0xf1, 0xfc, 0x64, 0x87, 0x5a, 0xc9, 0x43, 0x3c, 0x7b, 0x56, 0xb9, 0x27, 0xbc, 0x59, 0xf0, 0xf4, - 0xd7, 0x19, 0x78, 0x79, 0xa9, 0xf1, 0xa5, 0x8e, 0xbe, 0x06, 0x60, 0xda, 0xee, 0x34, 0x10, 0x15, - 0x91, 0xd8, 0x60, 0x4b, 0x1c, 0xe1, 0x9b, 0x17, 0xdb, 0x3c, 0xa7, 0x41, 0x2c, 0xcf, 0x72, 0x39, - 0x08, 0x88, 0x13, 0xee, 0xce, 0x1c, 0xcd, 0x71, 0x47, 0x5f, 0x3f, 0x63, 0xa4, 0x0b, 0x89, 0xf9, - 0x1e, 0x20, 0xdd, 0x32, 0xa9, 0x1d, 0x68, 0x7e, 0xe0, 0x51, 0x32, 0x31, 0xed, 0x11, 0x3f, 0x41, - 0x8a, 0x7b, 0xf9, 0x21, 0xb1, 0x7c, 0xaa, 0xae, 0x09, 0x71, 0x3f, 0x92, 0x32, 0x0d, 0x9e, 0x40, - 0x5e, 0x42, 0xa3, 0x90, 0xd2, 0x10, 0xe2, 0x58, 0xa3, 0xf6, 0xdb, 0x12, 0x94, 0x13, 0x75, 0x35, - 0xbe, 0x02, 0x95, 0x87, 0xe4, 0x31, 0xd1, 0xa2, 0xbb, 0x92, 0x88, 0x44, 0x99, 0x61, 0xbd, 0xf0, - 0xbe, 0xf4, 0x1e, 0x6c, 0x72, 0x8a, 0x33, 0x0d, 0xa8, 0xa7, 0xe9, 0x16, 0xf1, 0x7d, 0x1e, 0xb4, - 0x22, 0xa7, 0x62, 0x26, 0xeb, 0x32, 0x51, 0x33, 0x92, 0xe0, 0xdb, 0xb0, 0xc1, 0x35, 0x26, 0x53, - 0x2b, 0x30, 0x5d, 0x8b, 0x6a, 0xec, 0xf6, 0xe6, 0xf3, 0x93, 0x24, 0xf6, 0x6c, 0x9d, 0x31, 0x8e, - 0x42, 0x02, 0xf3, 0xc8, 0xc7, 0x2d, 0x78, 0x8d, 0xab, 0x8d, 0xa8, 0x4d, 0x3d, 0x12, 0x50, 0x8d, - 0x7e, 0x31, 0x25, 0x96, 0xaf, 0x11, 0xdb, 0xd0, 0xc6, 0xc4, 0x1f, 0xcb, 0x9b, 0xcc, 0xc0, 0x7e, - 0x46, 0x96, 0xd4, 0x4b, 0x8c, 0x78, 0x10, 0xf2, 0x14, 0x4e, 0x6b, 0xd8, 0xc6, 0xc7, 0xc4, 0x1f, - 0xe3, 0x3d, 0xb8, 0xc0, 0xad, 0xf8, 0x81, 0x67, 0xda, 0x23, 0x4d, 0x1f, 0x53, 0xfd, 0x91, 0x36, - 0x0d, 0x86, 0x77, 0xe5, 0x57, 0x92, 0xfd, 0x73, 0x0f, 0xfb, 0x9c, 0xd3, 0x64, 0x94, 0xe3, 0x60, - 0x78, 0x17, 0xf7, 0xa1, 0xc2, 0x26, 0x63, 0x62, 0x7e, 0x49, 0xb5, 0xa1, 0xe3, 0xf1, 0xa3, 0xb1, - 0xba, 0x64, 0x6b, 0x4a, 0x44, 0xb0, 0xde, 0x0d, 0x15, 0x8e, 0x1c, 0x83, 0xee, 0xe5, 0xfb, 0x3d, - 0x45, 0x69, 0xa9, 0xe5, 0xc8, 0xca, 0x7d, 0xc7, 0x63, 0x09, 0x35, 0x72, 0xe2, 0x00, 0x97, 0x45, - 0x42, 0x8d, 0x9c, 0x28, 0xbc, 0xb7, 0x61, 0x43, 0xd7, 0xc5, 0x98, 0x4d, 0x5d, 0x0b, 0xef, 0x58, - 0xbe, 0x8c, 0x52, 0xc1, 0xd2, 0xf5, 0x03, 0x41, 0x08, 0x73, 0xdc, 0xc7, 0x1f, 0xc2, 0xcb, 0xb3, - 0x60, 0x25, 0x15, 0xd7, 0x17, 0x46, 0x39, 0xaf, 0x7a, 0x1b, 0x36, 0xdc, 0xd3, 0x45, 0x45, 0x9c, - 0xea, 0xd1, 0x3d, 0x9d, 0x57, 0xfb, 0x00, 0x36, 0xdd, 0xb1, 0xbb, 0xa8, 0x77, 0x3d, 0xa9, 0x87, - 0xdd, 0xb1, 0x3b, 0xaf, 0xf8, 0x16, 0xbf, 0x70, 0x7b, 0x54, 0x27, 0x01, 0x35, 0xe4, 0x8b, 0x49, - 0x7a, 0x42, 0x80, 0x77, 0x01, 0xe9, 0xba, 0x46, 0x6d, 0x72, 0x62, 0x51, 0x8d, 0x78, 0xd4, 0x26, - 0xbe, 0x7c, 0x39, 0x49, 0xae, 0xea, 0xba, 0xc2, 0xa5, 0x0d, 0x2e, 0xc4, 0xd7, 0x61, 0xdd, 0x39, - 0x79, 0xa8, 0x8b, 0x94, 0xd4, 0x5c, 0x8f, 0x0e, 0xcd, 0xa7, 0xf2, 0x9b, 0x3c, 0xbe, 0x6b, 0x4c, - 0xc0, 0x13, 0xb2, 0xc7, 0x61, 0x7c, 0x0d, 0x90, 0xee, 0x8f, 0x89, 0xe7, 0xf2, 0x3d, 0xd9, 0x77, - 0x89, 0x4e, 0xe5, 0xb7, 0x04, 0x55, 0xe0, 0x9d, 0x08, 0x66, 0x4b, 0xc2, 0x7f, 0x62, 0x0e, 0x83, - 0xc8, 0xe2, 0x55, 0xb1, 0x24, 0x38, 0x16, 0x5a, 0xdb, 0x01, 0xc4, 0x42, 0x91, 0xea, 0x78, 0x87, - 0xd3, 0xaa, 0xee, 0xd8, 0x4d, 0xf6, 0xfb, 0x06, 0xac, 0x32, 0xe6, 0xac, 0xd3, 0x6b, 0xa2, 0x20, - 0x73, 0xc7, 0x89, 0x1e, 0x6f, 0xc1, 0x05, 0x46, 0x9a, 0xd0, 0x80, 0x18, 0x24, 0x20, 0x09, 0xf6, - 0xbb, 0x9c, 0xcd, 0xe2, 0x7e, 0x14, 0x0a, 0x53, 0x7e, 0x7a, 0xd3, 0x93, 0xd3, 0x38, 0xb3, 0x6e, - 0x08, 0x3f, 0x19, 0x16, 0xe5, 0xd6, 0xf7, 0x56, 0x74, 0xd7, 0xf6, 0xa0, 0x92, 0x4c, 0x7c, 0x5c, - 0x02, 0x91, 0xfa, 0x48, 0x62, 0x55, 0x50, 0xb3, 0xdb, 0x62, 0xf5, 0xcb, 0xe7, 0x0a, 0xca, 0xb0, - 0x3a, 0xea, 0xb0, 0x3d, 0x50, 0x34, 0xf5, 0xb8, 0x33, 0x68, 0x1f, 0x29, 0x28, 0x9b, 0x28, 0xd8, - 0x1f, 0xe4, 0x8a, 0x6f, 0xa3, 0xab, 0xb5, 0xaf, 0x33, 0x50, 0x4d, 0xdf, 0xc0, 0xf0, 0x8f, 0xe0, - 0x62, 0xf4, 0x5c, 0xe2, 0xd3, 0x40, 0x7b, 0x62, 0x7a, 0x7c, 0x45, 0x4e, 0x88, 0x38, 0x1d, 0xe3, - 0x9c, 0xd8, 0x0c, 0x59, 0x7d, 0x1a, 0x7c, 0x62, 0x7a, 0x6c, 0xbd, 0x4d, 0x48, 0x80, 0x0f, 0xe1, - 0xb2, 0xed, 0x68, 0x7e, 0x40, 0x6c, 0x83, 0x78, 0x86, 0x36, 0x7b, 0xa8, 0xd2, 0x88, 0xae, 0x53, - 0xdf, 0x77, 0xc4, 0x49, 0x18, 0x5b, 0x79, 0xd5, 0x76, 0xfa, 0x21, 0x79, 0x76, 0x44, 0x34, 0x42, - 0xea, 0x5c, 0xfe, 0x66, 0xcf, 0xca, 0xdf, 0x57, 0xa0, 0x34, 0x21, 0xae, 0x46, 0xed, 0xc0, 0x3b, - 0xe5, 0x75, 0x77, 0x51, 0x2d, 0x4e, 0x88, 0xab, 0xb0, 0xf6, 0x0b, 0xb9, 0xfe, 0x3c, 0xc8, 0x15, - 0x8b, 0xa8, 0xf4, 0x20, 0x57, 0x2c, 0x21, 0xa8, 0xfd, 0x33, 0x0b, 0x95, 0x64, 0x1d, 0xce, 0xae, - 0x35, 0x3a, 0x3f, 0xb2, 0x24, 0xbe, 0xa9, 0xbd, 0xf1, 0x8d, 0x55, 0x7b, 0xbd, 0xc9, 0xce, 0xb2, - 0xbd, 0x82, 0xa8, 0x8e, 0x55, 0xa1, 0xc9, 0xea, 0x08, 0x96, 0x6c, 0x54, 0x54, 0x23, 0x45, 0x35, - 0x6c, 0xe1, 0x03, 0x28, 0x3c, 0xf4, 0xb9, 0xed, 0x02, 0xb7, 0xfd, 0xe6, 0x37, 0xdb, 0x7e, 0xd0, - 0xe7, 0xc6, 0x4b, 0x0f, 0xfa, 0x5a, 0xa7, 0xab, 0x1e, 0x35, 0x0e, 0xd5, 0x50, 0x1d, 0x5f, 0x82, - 0x9c, 0x45, 0xbe, 0x3c, 0x4d, 0x9f, 0x7a, 0x1c, 0x3a, 0xef, 0x24, 0x5c, 0x82, 0xdc, 0x13, 0x4a, - 0x1e, 0xa5, 0xcf, 0x1a, 0x0e, 0x7d, 0x8f, 0x8b, 0x61, 0x17, 0xf2, 0x3c, 0x5e, 0x18, 0x20, 0x8c, - 0x18, 0x7a, 0x09, 0x17, 0x21, 0xd7, 0xec, 0xaa, 0x6c, 0x41, 0x20, 0xa8, 0x08, 0x54, 0xeb, 0xb5, - 0x95, 0xa6, 0x82, 0x32, 0xb5, 0xdb, 0x50, 0x10, 0x41, 0x60, 0x8b, 0x25, 0x0e, 0x03, 0x7a, 0x29, - 0x6c, 0x86, 0x36, 0xa4, 0x48, 0x7a, 0x7c, 0xb4, 0xaf, 0xa8, 0x28, 0x93, 0x9e, 0xea, 0x1c, 0xca, - 0xd7, 0x7c, 0xa8, 0x24, 0x0b, 0xf1, 0x17, 0x73, 0xc9, 0xfe, 0x9b, 0x04, 0xe5, 0x44, 0x61, 0xcd, - 0x2a, 0x22, 0x62, 0x59, 0xce, 0x13, 0x8d, 0x58, 0x26, 0xf1, 0xc3, 0xd4, 0x00, 0x0e, 0x35, 0x18, - 0x72, 0xde, 0xa9, 0x7b, 0x41, 0x4b, 0x24, 0x8f, 0x0a, 0xb5, 0x3f, 0x4a, 0x80, 0xe6, 0x2b, 0xdb, - 0x39, 0x37, 0xa5, 0x1f, 0xd2, 0xcd, 0xda, 0x1f, 0x24, 0xa8, 0xa6, 0xcb, 0xd9, 0x39, 0xf7, 0xae, - 0xfc, 0xa0, 0xee, 0xfd, 0x23, 0x03, 0xab, 0xa9, 0x22, 0xf6, 0xbc, 0xde, 0x7d, 0x01, 0xeb, 0xa6, - 0x41, 0x27, 0xae, 0x13, 0x50, 0x5b, 0x3f, 0xd5, 0x2c, 0xfa, 0x98, 0x5a, 0x72, 0x8d, 0x6f, 0x1a, - 0xbb, 0xdf, 0x5c, 0x26, 0xd7, 0xdb, 0x33, 0xbd, 0x43, 0xa6, 0xb6, 0xb7, 0xd1, 0x6e, 0x29, 0x47, - 0xbd, 0xee, 0x40, 0xe9, 0x34, 0x3f, 0xd3, 0x8e, 0x3b, 0x3f, 0xed, 0x74, 0x3f, 0xe9, 0xa8, 0xc8, - 0x9c, 0xa3, 0x7d, 0x8f, 0xcb, 0xbe, 0x07, 0x68, 0xde, 0x29, 0x7c, 0x11, 0x96, 0xb9, 0x85, 0x5e, - 0xc2, 0x1b, 0xb0, 0xd6, 0xe9, 0x6a, 0xfd, 0x76, 0x4b, 0xd1, 0x94, 0xfb, 0xf7, 0x95, 0xe6, 0xa0, - 0x2f, 0x1e, 0x3e, 0x62, 0xf6, 0x20, 0xb5, 0xc0, 0x6b, 0xbf, 0xcf, 0xc2, 0xc6, 0x12, 0x4f, 0x70, - 0x23, 0xbc, 0xb2, 0x88, 0x5b, 0xd4, 0x8d, 0xf3, 0x78, 0x5f, 0x67, 0x35, 0x43, 0x8f, 0x78, 0x41, - 0x78, 0xc3, 0xb9, 0x06, 0x2c, 0x4a, 0x76, 0x60, 0x0e, 0x4d, 0xea, 0x85, 0xef, 0x44, 0xe2, 0x1e, - 0xb3, 0x36, 0xc3, 0xc5, 0x53, 0xd1, 0xbb, 0x80, 0x5d, 0xc7, 0x37, 0x03, 0xf3, 0x31, 0xd5, 0x4c, - 0x3b, 0x7a, 0x54, 0x62, 0xf7, 0x9a, 0x9c, 0x8a, 0x22, 0x49, 0xdb, 0x0e, 0x62, 0xb6, 0x4d, 0x47, - 0x64, 0x8e, 0xcd, 0x36, 0xf3, 0xac, 0x8a, 0x22, 0x49, 0xcc, 0xbe, 0x02, 0x15, 0xc3, 0x99, 0xb2, - 0x62, 0x4f, 0xf0, 0xd8, 0xd9, 0x21, 0xa9, 0x65, 0x81, 0xc5, 0x94, 0xb0, 0x8c, 0x9f, 0xbd, 0x66, - 0x55, 0xd4, 0xb2, 0xc0, 0x04, 0xe5, 0x2a, 0xac, 0x91, 0xd1, 0xc8, 0x63, 0xc6, 0x23, 0x43, 0xe2, - 0x62, 0x52, 0x8d, 0x61, 0x4e, 0xdc, 0x7a, 0x00, 0xc5, 0x28, 0x0e, 0xec, 0xa8, 0x66, 0x91, 0xd0, - 0x5c, 0x71, 0xdb, 0xce, 0xec, 0x94, 0xd4, 0xa2, 0x1d, 0x09, 0xaf, 0x40, 0xc5, 0xf4, 0xb5, 0xd9, - 0xe3, 0x7c, 0x66, 0x3b, 0xb3, 0x53, 0x54, 0xcb, 0xa6, 0x1f, 0x3f, 0x6c, 0xd6, 0xbe, 0xca, 0x40, - 0x35, 0xfd, 0x71, 0x01, 0xb7, 0xa0, 0x68, 0x39, 0x3a, 0xe1, 0xa9, 0x25, 0xbe, 0x6c, 0xed, 0x3c, - 0xe7, 0x7b, 0x44, 0xfd, 0x30, 0xe4, 0xab, 0xb1, 0xe6, 0xd6, 0xdf, 0x25, 0x28, 0x46, 0x30, 0xbe, - 0x00, 0x39, 0x97, 0x04, 0x63, 0x6e, 0x2e, 0xbf, 0x9f, 0x41, 0x92, 0xca, 0xdb, 0x0c, 0xf7, 0x5d, - 0x62, 0xf3, 0x14, 0x08, 0x71, 0xd6, 0x66, 0xf3, 0x6a, 0x51, 0x62, 0xf0, 0x5b, 0x8f, 0x33, 0x99, - 0x50, 0x3b, 0xf0, 0xa3, 0x79, 0x0d, 0xf1, 0x66, 0x08, 0xe3, 0x77, 0x60, 0x3d, 0xf0, 0x88, 0x69, - 0xa5, 0xb8, 0x39, 0xce, 0x45, 0x91, 0x20, 0x26, 0xef, 0xc1, 0xa5, 0xc8, 0xae, 0x41, 0x03, 0xa2, - 0x8f, 0xa9, 0x31, 0x53, 0x2a, 0xf0, 0xd7, 0x8d, 0x8b, 0x21, 0xa1, 0x15, 0xca, 0x23, 0xdd, 0xda, - 0xd7, 0x12, 0xac, 0x47, 0xf7, 0x34, 0x23, 0x0e, 0xd6, 0x11, 0x00, 0xb1, 0x6d, 0x27, 0x48, 0x86, - 0x6b, 0x31, 0x95, 0x17, 0xf4, 0xea, 0x8d, 0x58, 0x49, 0x4d, 0x18, 0xd8, 0x9a, 0x00, 0xcc, 0x24, - 0x67, 0x86, 0xed, 0x32, 0x94, 0xc3, 0x2f, 0x47, 0xfc, 0xf3, 0xa3, 0xb8, 0xd9, 0x83, 0x80, 0xd8, - 0x85, 0x0e, 0x6f, 0x42, 0xfe, 0x84, 0x8e, 0x4c, 0x3b, 0x7c, 0x0f, 0x16, 0x8d, 0xe8, 0xfd, 0x25, - 0x17, 0xbf, 0xbf, 0xec, 0xff, 0x46, 0x82, 0x0d, 0xdd, 0x99, 0xcc, 0xfb, 0xbb, 0x8f, 0xe6, 0x9e, - 0x17, 0xfc, 0x8f, 0xa5, 0xcf, 0x3f, 0x1a, 0x99, 0xc1, 0x78, 0x7a, 0x52, 0xd7, 0x9d, 0xc9, 0xee, - 0xc8, 0xb1, 0x88, 0x3d, 0x9a, 0x7d, 0x3f, 0xe5, 0x7f, 0xf4, 0x1b, 0x23, 0x6a, 0xdf, 0x18, 0x39, - 0x89, 0xaf, 0xa9, 0xf7, 0x66, 0x7f, 0xff, 0x27, 0x49, 0x7f, 0xca, 0x64, 0x0f, 0x7a, 0xfb, 0x7f, - 0xce, 0x6c, 0x1d, 0x88, 0xee, 0x7a, 0x51, 0x78, 0x54, 0x3a, 0xb4, 0xa8, 0xce, 0x86, 0xfc, 0xff, - 0x00, 0x00, 0x00, 0xff, 0xff, 0x3e, 0xe8, 0xef, 0xc4, 0x9b, 0x1d, 0x00, 0x00, -} diff --git a/cmd/bpa-operator/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto b/cmd/bpa-operator/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto deleted file mode 100644 index ed08fcb..0000000 --- a/cmd/bpa-operator/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto +++ /dev/null @@ -1,883 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Author: kenton@google.com (Kenton Varda) -// Based on original Protocol Buffers design by -// Sanjay Ghemawat, Jeff Dean, and others. -// -// The messages in this file describe the definitions found in .proto files. -// A valid .proto file can be translated directly to a FileDescriptorProto -// without any other information (e.g. without reading its imports). - - -syntax = "proto2"; - -package google.protobuf; -option go_package = "github.com/golang/protobuf/protoc-gen-go/descriptor;descriptor"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "DescriptorProtos"; -option csharp_namespace = "Google.Protobuf.Reflection"; -option objc_class_prefix = "GPB"; -option cc_enable_arenas = true; - -// descriptor.proto must be optimized for speed because reflection-based -// algorithms don't work during bootstrapping. -option optimize_for = SPEED; - -// The protocol compiler can output a FileDescriptorSet containing the .proto -// files it parses. -message FileDescriptorSet { - repeated FileDescriptorProto file = 1; -} - -// Describes a complete .proto file. -message FileDescriptorProto { - optional string name = 1; // file name, relative to root of source tree - optional string package = 2; // e.g. "foo", "foo.bar", etc. - - // Names of files imported by this file. - repeated string dependency = 3; - // Indexes of the public imported files in the dependency list above. - repeated int32 public_dependency = 10; - // Indexes of the weak imported files in the dependency list. - // For Google-internal migration only. Do not use. - repeated int32 weak_dependency = 11; - - // All top-level definitions in this file. - repeated DescriptorProto message_type = 4; - repeated EnumDescriptorProto enum_type = 5; - repeated ServiceDescriptorProto service = 6; - repeated FieldDescriptorProto extension = 7; - - optional FileOptions options = 8; - - // This field contains optional information about the original source code. - // You may safely remove this entire field without harming runtime - // functionality of the descriptors -- the information is needed only by - // development tools. - optional SourceCodeInfo source_code_info = 9; - - // The syntax of the proto file. - // The supported values are "proto2" and "proto3". - optional string syntax = 12; -} - -// Describes a message type. -message DescriptorProto { - optional string name = 1; - - repeated FieldDescriptorProto field = 2; - repeated FieldDescriptorProto extension = 6; - - repeated DescriptorProto nested_type = 3; - repeated EnumDescriptorProto enum_type = 4; - - message ExtensionRange { - optional int32 start = 1; - optional int32 end = 2; - - optional ExtensionRangeOptions options = 3; - } - repeated ExtensionRange extension_range = 5; - - repeated OneofDescriptorProto oneof_decl = 8; - - optional MessageOptions options = 7; - - // Range of reserved tag numbers. Reserved tag numbers may not be used by - // fields or extension ranges in the same message. Reserved ranges may - // not overlap. - message ReservedRange { - optional int32 start = 1; // Inclusive. - optional int32 end = 2; // Exclusive. - } - repeated ReservedRange reserved_range = 9; - // Reserved field names, which may not be used by fields in the same message. - // A given name may only be reserved once. - repeated string reserved_name = 10; -} - -message ExtensionRangeOptions { - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - -// Describes a field within a message. -message FieldDescriptorProto { - enum Type { - // 0 is reserved for errors. - // Order is weird for historical reasons. - TYPE_DOUBLE = 1; - TYPE_FLOAT = 2; - // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if - // negative values are likely. - TYPE_INT64 = 3; - TYPE_UINT64 = 4; - // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if - // negative values are likely. - TYPE_INT32 = 5; - TYPE_FIXED64 = 6; - TYPE_FIXED32 = 7; - TYPE_BOOL = 8; - TYPE_STRING = 9; - // Tag-delimited aggregate. - // Group type is deprecated and not supported in proto3. However, Proto3 - // implementations should still be able to parse the group wire format and - // treat group fields as unknown fields. - TYPE_GROUP = 10; - TYPE_MESSAGE = 11; // Length-delimited aggregate. - - // New in version 2. - TYPE_BYTES = 12; - TYPE_UINT32 = 13; - TYPE_ENUM = 14; - TYPE_SFIXED32 = 15; - TYPE_SFIXED64 = 16; - TYPE_SINT32 = 17; // Uses ZigZag encoding. - TYPE_SINT64 = 18; // Uses ZigZag encoding. - }; - - enum Label { - // 0 is reserved for errors - LABEL_OPTIONAL = 1; - LABEL_REQUIRED = 2; - LABEL_REPEATED = 3; - }; - - optional string name = 1; - optional int32 number = 3; - optional Label label = 4; - - // If type_name is set, this need not be set. If both this and type_name - // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. - optional Type type = 5; - - // For message and enum types, this is the name of the type. If the name - // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping - // rules are used to find the type (i.e. first the nested types within this - // message are searched, then within the parent, on up to the root - // namespace). - optional string type_name = 6; - - // For extensions, this is the name of the type being extended. It is - // resolved in the same manner as type_name. - optional string extendee = 2; - - // For numeric types, contains the original text representation of the value. - // For booleans, "true" or "false". - // For strings, contains the default text contents (not escaped in any way). - // For bytes, contains the C escaped value. All bytes >= 128 are escaped. - // TODO(kenton): Base-64 encode? - optional string default_value = 7; - - // If set, gives the index of a oneof in the containing type's oneof_decl - // list. This field is a member of that oneof. - optional int32 oneof_index = 9; - - // JSON name of this field. The value is set by protocol compiler. If the - // user has set a "json_name" option on this field, that option's value - // will be used. Otherwise, it's deduced from the field's name by converting - // it to camelCase. - optional string json_name = 10; - - optional FieldOptions options = 8; -} - -// Describes a oneof. -message OneofDescriptorProto { - optional string name = 1; - optional OneofOptions options = 2; -} - -// Describes an enum type. -message EnumDescriptorProto { - optional string name = 1; - - repeated EnumValueDescriptorProto value = 2; - - optional EnumOptions options = 3; - - // Range of reserved numeric values. Reserved values may not be used by - // entries in the same enum. Reserved ranges may not overlap. - // - // Note that this is distinct from DescriptorProto.ReservedRange in that it - // is inclusive such that it can appropriately represent the entire int32 - // domain. - message EnumReservedRange { - optional int32 start = 1; // Inclusive. - optional int32 end = 2; // Inclusive. - } - - // Range of reserved numeric values. Reserved numeric values may not be used - // by enum values in the same enum declaration. Reserved ranges may not - // overlap. - repeated EnumReservedRange reserved_range = 4; - - // Reserved enum value names, which may not be reused. A given name may only - // be reserved once. - repeated string reserved_name = 5; -} - -// Describes a value within an enum. -message EnumValueDescriptorProto { - optional string name = 1; - optional int32 number = 2; - - optional EnumValueOptions options = 3; -} - -// Describes a service. -message ServiceDescriptorProto { - optional string name = 1; - repeated MethodDescriptorProto method = 2; - - optional ServiceOptions options = 3; -} - -// Describes a method of a service. -message MethodDescriptorProto { - optional string name = 1; - - // Input and output type names. These are resolved in the same way as - // FieldDescriptorProto.type_name, but must refer to a message type. - optional string input_type = 2; - optional string output_type = 3; - - optional MethodOptions options = 4; - - // Identifies if client streams multiple client messages - optional bool client_streaming = 5 [default=false]; - // Identifies if server streams multiple server messages - optional bool server_streaming = 6 [default=false]; -} - - -// =================================================================== -// Options - -// Each of the definitions above may have "options" attached. These are -// just annotations which may cause code to be generated slightly differently -// or may contain hints for code that manipulates protocol messages. -// -// Clients may define custom options as extensions of the *Options messages. -// These extensions may not yet be known at parsing time, so the parser cannot -// store the values in them. Instead it stores them in a field in the *Options -// message called uninterpreted_option. This field must have the same name -// across all *Options messages. We then use this field to populate the -// extensions when we build a descriptor, at which point all protos have been -// parsed and so all extensions are known. -// -// Extension numbers for custom options may be chosen as follows: -// * For options which will only be used within a single application or -// organization, or for experimental options, use field numbers 50000 -// through 99999. It is up to you to ensure that you do not use the -// same number for multiple options. -// * For options which will be published and used publicly by multiple -// independent entities, e-mail protobuf-global-extension-registry@google.com -// to reserve extension numbers. Simply provide your project name (e.g. -// Objective-C plugin) and your project website (if available) -- there's no -// need to explain how you intend to use them. Usually you only need one -// extension number. You can declare multiple options with only one extension -// number by putting them in a sub-message. See the Custom Options section of -// the docs for examples: -// https://developers.google.com/protocol-buffers/docs/proto#options -// If this turns out to be popular, a web service will be set up -// to automatically assign option numbers. - - -message FileOptions { - - // Sets the Java package where classes generated from this .proto will be - // placed. By default, the proto package is used, but this is often - // inappropriate because proto packages do not normally start with backwards - // domain names. - optional string java_package = 1; - - - // If set, all the classes from the .proto file are wrapped in a single - // outer class with the given name. This applies to both Proto1 - // (equivalent to the old "--one_java_file" option) and Proto2 (where - // a .proto always translates to a single class, but you may want to - // explicitly choose the class name). - optional string java_outer_classname = 8; - - // If set true, then the Java code generator will generate a separate .java - // file for each top-level message, enum, and service defined in the .proto - // file. Thus, these types will *not* be nested inside the outer class - // named by java_outer_classname. However, the outer class will still be - // generated to contain the file's getDescriptor() method as well as any - // top-level extensions defined in the file. - optional bool java_multiple_files = 10 [default=false]; - - // This option does nothing. - optional bool java_generate_equals_and_hash = 20 [deprecated=true]; - - // If set true, then the Java2 code generator will generate code that - // throws an exception whenever an attempt is made to assign a non-UTF-8 - // byte sequence to a string field. - // Message reflection will do the same. - // However, an extension field still accepts non-UTF-8 byte sequences. - // This option has no effect on when used with the lite runtime. - optional bool java_string_check_utf8 = 27 [default=false]; - - - // Generated classes can be optimized for speed or code size. - enum OptimizeMode { - SPEED = 1; // Generate complete code for parsing, serialization, - // etc. - CODE_SIZE = 2; // Use ReflectionOps to implement these methods. - LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime. - } - optional OptimizeMode optimize_for = 9 [default=SPEED]; - - // Sets the Go package where structs generated from this .proto will be - // placed. If omitted, the Go package will be derived from the following: - // - The basename of the package import path, if provided. - // - Otherwise, the package statement in the .proto file, if present. - // - Otherwise, the basename of the .proto file, without extension. - optional string go_package = 11; - - - - // Should generic services be generated in each language? "Generic" services - // are not specific to any particular RPC system. They are generated by the - // main code generators in each language (without additional plugins). - // Generic services were the only kind of service generation supported by - // early versions of google.protobuf. - // - // Generic services are now considered deprecated in favor of using plugins - // that generate code specific to your particular RPC system. Therefore, - // these default to false. Old code which depends on generic services should - // explicitly set them to true. - optional bool cc_generic_services = 16 [default=false]; - optional bool java_generic_services = 17 [default=false]; - optional bool py_generic_services = 18 [default=false]; - optional bool php_generic_services = 42 [default=false]; - - // Is this file deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for everything in the file, or it will be completely ignored; in the very - // least, this is a formalization for deprecating files. - optional bool deprecated = 23 [default=false]; - - // Enables the use of arenas for the proto messages in this file. This applies - // only to generated classes for C++. - optional bool cc_enable_arenas = 31 [default=false]; - - - // Sets the objective c class prefix which is prepended to all objective c - // generated classes from this .proto. There is no default. - optional string objc_class_prefix = 36; - - // Namespace for generated classes; defaults to the package. - optional string csharp_namespace = 37; - - // By default Swift generators will take the proto package and CamelCase it - // replacing '.' with underscore and use that to prefix the types/symbols - // defined. When this options is provided, they will use this value instead - // to prefix the types/symbols defined. - optional string swift_prefix = 39; - - // Sets the php class prefix which is prepended to all php generated classes - // from this .proto. Default is empty. - optional string php_class_prefix = 40; - - // Use this option to change the namespace of php generated classes. Default - // is empty. When this option is empty, the package name will be used for - // determining the namespace. - optional string php_namespace = 41; - - - // Use this option to change the namespace of php generated metadata classes. - // Default is empty. When this option is empty, the proto file name will be used - // for determining the namespace. - optional string php_metadata_namespace = 44; - - // Use this option to change the package of ruby generated classes. Default - // is empty. When this option is not set, the package name will be used for - // determining the ruby package. - optional string ruby_package = 45; - - // The parser stores options it doesn't recognize here. - // See the documentation for the "Options" section above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. - // See the documentation for the "Options" section above. - extensions 1000 to max; - - reserved 38; -} - -message MessageOptions { - // Set true to use the old proto1 MessageSet wire format for extensions. - // This is provided for backwards-compatibility with the MessageSet wire - // format. You should not use this for any other reason: It's less - // efficient, has fewer features, and is more complicated. - // - // The message must be defined exactly as follows: - // message Foo { - // option message_set_wire_format = true; - // extensions 4 to max; - // } - // Note that the message cannot have any defined fields; MessageSets only - // have extensions. - // - // All extensions of your type must be singular messages; e.g. they cannot - // be int32s, enums, or repeated messages. - // - // Because this is an option, the above two restrictions are not enforced by - // the protocol compiler. - optional bool message_set_wire_format = 1 [default=false]; - - // Disables the generation of the standard "descriptor()" accessor, which can - // conflict with a field of the same name. This is meant to make migration - // from proto1 easier; new code should avoid fields named "descriptor". - optional bool no_standard_descriptor_accessor = 2 [default=false]; - - // Is this message deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the message, or it will be completely ignored; in the very least, - // this is a formalization for deprecating messages. - optional bool deprecated = 3 [default=false]; - - // Whether the message is an automatically generated map entry type for the - // maps field. - // - // For maps fields: - // map map_field = 1; - // The parsed descriptor looks like: - // message MapFieldEntry { - // option map_entry = true; - // optional KeyType key = 1; - // optional ValueType value = 2; - // } - // repeated MapFieldEntry map_field = 1; - // - // Implementations may choose not to generate the map_entry=true message, but - // use a native map in the target language to hold the keys and values. - // The reflection APIs in such implementions still need to work as - // if the field is a repeated message field. - // - // NOTE: Do not set the option in .proto files. Always use the maps syntax - // instead. The option should only be implicitly set by the proto compiler - // parser. - optional bool map_entry = 7; - - reserved 8; // javalite_serializable - reserved 9; // javanano_as_lite - - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - -message FieldOptions { - // The ctype option instructs the C++ code generator to use a different - // representation of the field than it normally would. See the specific - // options below. This option is not yet implemented in the open source - // release -- sorry, we'll try to include it in a future version! - optional CType ctype = 1 [default = STRING]; - enum CType { - // Default mode. - STRING = 0; - - CORD = 1; - - STRING_PIECE = 2; - } - // The packed option can be enabled for repeated primitive fields to enable - // a more efficient representation on the wire. Rather than repeatedly - // writing the tag and type for each element, the entire array is encoded as - // a single length-delimited blob. In proto3, only explicit setting it to - // false will avoid using packed encoding. - optional bool packed = 2; - - // The jstype option determines the JavaScript type used for values of the - // field. The option is permitted only for 64 bit integral and fixed types - // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING - // is represented as JavaScript string, which avoids loss of precision that - // can happen when a large value is converted to a floating point JavaScript. - // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to - // use the JavaScript "number" type. The behavior of the default option - // JS_NORMAL is implementation dependent. - // - // This option is an enum to permit additional types to be added, e.g. - // goog.math.Integer. - optional JSType jstype = 6 [default = JS_NORMAL]; - enum JSType { - // Use the default type. - JS_NORMAL = 0; - - // Use JavaScript strings. - JS_STRING = 1; - - // Use JavaScript numbers. - JS_NUMBER = 2; - } - - // Should this field be parsed lazily? Lazy applies only to message-type - // fields. It means that when the outer message is initially parsed, the - // inner message's contents will not be parsed but instead stored in encoded - // form. The inner message will actually be parsed when it is first accessed. - // - // This is only a hint. Implementations are free to choose whether to use - // eager or lazy parsing regardless of the value of this option. However, - // setting this option true suggests that the protocol author believes that - // using lazy parsing on this field is worth the additional bookkeeping - // overhead typically needed to implement it. - // - // This option does not affect the public interface of any generated code; - // all method signatures remain the same. Furthermore, thread-safety of the - // interface is not affected by this option; const methods remain safe to - // call from multiple threads concurrently, while non-const methods continue - // to require exclusive access. - // - // - // Note that implementations may choose not to check required fields within - // a lazy sub-message. That is, calling IsInitialized() on the outer message - // may return true even if the inner message has missing required fields. - // This is necessary because otherwise the inner message would have to be - // parsed in order to perform the check, defeating the purpose of lazy - // parsing. An implementation which chooses not to check required fields - // must be consistent about it. That is, for any particular sub-message, the - // implementation must either *always* check its required fields, or *never* - // check its required fields, regardless of whether or not the message has - // been parsed. - optional bool lazy = 5 [default=false]; - - // Is this field deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for accessors, or it will be completely ignored; in the very least, this - // is a formalization for deprecating fields. - optional bool deprecated = 3 [default=false]; - - // For Google-internal migration only. Do not use. - optional bool weak = 10 [default=false]; - - - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; - - reserved 4; // removed jtype -} - -message OneofOptions { - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - -message EnumOptions { - - // Set this option to true to allow mapping different tag names to the same - // value. - optional bool allow_alias = 2; - - // Is this enum deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the enum, or it will be completely ignored; in the very least, this - // is a formalization for deprecating enums. - optional bool deprecated = 3 [default=false]; - - reserved 5; // javanano_as_lite - - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - -message EnumValueOptions { - // Is this enum value deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the enum value, or it will be completely ignored; in the very least, - // this is a formalization for deprecating enum values. - optional bool deprecated = 1 [default=false]; - - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - -message ServiceOptions { - - // Note: Field numbers 1 through 32 are reserved for Google's internal RPC - // framework. We apologize for hoarding these numbers to ourselves, but - // we were already using them long before we decided to release Protocol - // Buffers. - - // Is this service deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the service, or it will be completely ignored; in the very least, - // this is a formalization for deprecating services. - optional bool deprecated = 33 [default=false]; - - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - -message MethodOptions { - - // Note: Field numbers 1 through 32 are reserved for Google's internal RPC - // framework. We apologize for hoarding these numbers to ourselves, but - // we were already using them long before we decided to release Protocol - // Buffers. - - // Is this method deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the method, or it will be completely ignored; in the very least, - // this is a formalization for deprecating methods. - optional bool deprecated = 33 [default=false]; - - // Is this method side-effect-free (or safe in HTTP parlance), or idempotent, - // or neither? HTTP based RPC implementation may choose GET verb for safe - // methods, and PUT verb for idempotent methods instead of the default POST. - enum IdempotencyLevel { - IDEMPOTENCY_UNKNOWN = 0; - NO_SIDE_EFFECTS = 1; // implies idempotent - IDEMPOTENT = 2; // idempotent, but may have side effects - } - optional IdempotencyLevel idempotency_level = - 34 [default=IDEMPOTENCY_UNKNOWN]; - - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - - -// A message representing a option the parser does not recognize. This only -// appears in options protos created by the compiler::Parser class. -// DescriptorPool resolves these when building Descriptor objects. Therefore, -// options protos in descriptor objects (e.g. returned by Descriptor::options(), -// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions -// in them. -message UninterpretedOption { - // The name of the uninterpreted option. Each string represents a segment in - // a dot-separated name. is_extension is true iff a segment represents an - // extension (denoted with parentheses in options specs in .proto files). - // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents - // "foo.(bar.baz).qux". - message NamePart { - required string name_part = 1; - required bool is_extension = 2; - } - repeated NamePart name = 2; - - // The value of the uninterpreted option, in whatever type the tokenizer - // identified it as during parsing. Exactly one of these should be set. - optional string identifier_value = 3; - optional uint64 positive_int_value = 4; - optional int64 negative_int_value = 5; - optional double double_value = 6; - optional bytes string_value = 7; - optional string aggregate_value = 8; -} - -// =================================================================== -// Optional source code info - -// Encapsulates information about the original source file from which a -// FileDescriptorProto was generated. -message SourceCodeInfo { - // A Location identifies a piece of source code in a .proto file which - // corresponds to a particular definition. This information is intended - // to be useful to IDEs, code indexers, documentation generators, and similar - // tools. - // - // For example, say we have a file like: - // message Foo { - // optional string foo = 1; - // } - // Let's look at just the field definition: - // optional string foo = 1; - // ^ ^^ ^^ ^ ^^^ - // a bc de f ghi - // We have the following locations: - // span path represents - // [a,i) [ 4, 0, 2, 0 ] The whole field definition. - // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). - // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). - // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). - // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). - // - // Notes: - // - A location may refer to a repeated field itself (i.e. not to any - // particular index within it). This is used whenever a set of elements are - // logically enclosed in a single code segment. For example, an entire - // extend block (possibly containing multiple extension definitions) will - // have an outer location whose path refers to the "extensions" repeated - // field without an index. - // - Multiple locations may have the same path. This happens when a single - // logical declaration is spread out across multiple places. The most - // obvious example is the "extend" block again -- there may be multiple - // extend blocks in the same scope, each of which will have the same path. - // - A location's span is not always a subset of its parent's span. For - // example, the "extendee" of an extension declaration appears at the - // beginning of the "extend" block and is shared by all extensions within - // the block. - // - Just because a location's span is a subset of some other location's span - // does not mean that it is a descendent. For example, a "group" defines - // both a type and a field in a single declaration. Thus, the locations - // corresponding to the type and field and their components will overlap. - // - Code which tries to interpret locations should probably be designed to - // ignore those that it doesn't understand, as more types of locations could - // be recorded in the future. - repeated Location location = 1; - message Location { - // Identifies which part of the FileDescriptorProto was defined at this - // location. - // - // Each element is a field number or an index. They form a path from - // the root FileDescriptorProto to the place where the definition. For - // example, this path: - // [ 4, 3, 2, 7, 1 ] - // refers to: - // file.message_type(3) // 4, 3 - // .field(7) // 2, 7 - // .name() // 1 - // This is because FileDescriptorProto.message_type has field number 4: - // repeated DescriptorProto message_type = 4; - // and DescriptorProto.field has field number 2: - // repeated FieldDescriptorProto field = 2; - // and FieldDescriptorProto.name has field number 1: - // optional string name = 1; - // - // Thus, the above path gives the location of a field name. If we removed - // the last element: - // [ 4, 3, 2, 7 ] - // this path refers to the whole field declaration (from the beginning - // of the label to the terminating semicolon). - repeated int32 path = 1 [packed=true]; - - // Always has exactly three or four elements: start line, start column, - // end line (optional, otherwise assumed same as start line), end column. - // These are packed into a single field for efficiency. Note that line - // and column numbers are zero-based -- typically you will want to add - // 1 to each before displaying to a user. - repeated int32 span = 2 [packed=true]; - - // If this SourceCodeInfo represents a complete declaration, these are any - // comments appearing before and after the declaration which appear to be - // attached to the declaration. - // - // A series of line comments appearing on consecutive lines, with no other - // tokens appearing on those lines, will be treated as a single comment. - // - // leading_detached_comments will keep paragraphs of comments that appear - // before (but not connected to) the current element. Each paragraph, - // separated by empty lines, will be one comment element in the repeated - // field. - // - // Only the comment content is provided; comment markers (e.g. //) are - // stripped out. For block comments, leading whitespace and an asterisk - // will be stripped from the beginning of each line other than the first. - // Newlines are included in the output. - // - // Examples: - // - // optional int32 foo = 1; // Comment attached to foo. - // // Comment attached to bar. - // optional int32 bar = 2; - // - // optional string baz = 3; - // // Comment attached to baz. - // // Another line attached to baz. - // - // // Comment attached to qux. - // // - // // Another line attached to qux. - // optional double qux = 4; - // - // // Detached comment for corge. This is not leading or trailing comments - // // to qux or corge because there are blank lines separating it from - // // both. - // - // // Detached comment for corge paragraph 2. - // - // optional string corge = 5; - // /* Block comment attached - // * to corge. Leading asterisks - // * will be removed. */ - // /* Block comment attached to - // * grault. */ - // optional int32 grault = 6; - // - // // ignored detached comments. - optional string leading_comments = 3; - optional string trailing_comments = 4; - repeated string leading_detached_comments = 6; - } -} - -// Describes the relationship between generated code and its original source -// file. A GeneratedCodeInfo message is associated with only one generated -// source file, but may contain references to different source .proto files. -message GeneratedCodeInfo { - // An Annotation connects some span of text in generated code to an element - // of its generating .proto file. - repeated Annotation annotation = 1; - message Annotation { - // Identifies the element in the original source .proto file. This field - // is formatted the same as SourceCodeInfo.Location.path. - repeated int32 path = 1 [packed=true]; - - // Identifies the filesystem path to the original source .proto. - optional string source_file = 2; - - // Identifies the starting offset in bytes in the generated code - // that relates to the identified object. - optional int32 begin = 3; - - // Identifies the ending offset in bytes in the generated code that - // relates to the identified offset. The end offset should be one past - // the last relevant byte (so the length of the text = end - begin). - optional int32 end = 4; - } -} diff --git a/cmd/bpa-operator/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go b/cmd/bpa-operator/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go deleted file mode 100644 index 6f4a902..0000000 --- a/cmd/bpa-operator/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go +++ /dev/null @@ -1,2806 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* - The code generator for the plugin for the Google protocol buffer compiler. - It generates Go code from the protocol buffer description files read by the - main routine. -*/ -package generator - -import ( - "bufio" - "bytes" - "compress/gzip" - "crypto/sha256" - "encoding/hex" - "fmt" - "go/ast" - "go/build" - "go/parser" - "go/printer" - "go/token" - "log" - "os" - "path" - "sort" - "strconv" - "strings" - "unicode" - "unicode/utf8" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/protoc-gen-go/generator/internal/remap" - - "github.com/golang/protobuf/protoc-gen-go/descriptor" - plugin "github.com/golang/protobuf/protoc-gen-go/plugin" -) - -// generatedCodeVersion indicates a version of the generated code. -// It is incremented whenever an incompatibility between the generated code and -// proto package is introduced; the generated code references -// a constant, proto.ProtoPackageIsVersionN (where N is generatedCodeVersion). -const generatedCodeVersion = 3 - -// A Plugin provides functionality to add to the output during Go code generation, -// such as to produce RPC stubs. -type Plugin interface { - // Name identifies the plugin. - Name() string - // Init is called once after data structures are built but before - // code generation begins. - Init(g *Generator) - // Generate produces the code generated by the plugin for this file, - // except for the imports, by calling the generator's methods P, In, and Out. - Generate(file *FileDescriptor) - // GenerateImports produces the import declarations for this file. - // It is called after Generate. - GenerateImports(file *FileDescriptor) -} - -var plugins []Plugin - -// RegisterPlugin installs a (second-order) plugin to be run when the Go output is generated. -// It is typically called during initialization. -func RegisterPlugin(p Plugin) { - plugins = append(plugins, p) -} - -// A GoImportPath is the import path of a Go package. e.g., "google.golang.org/genproto/protobuf". -type GoImportPath string - -func (p GoImportPath) String() string { return strconv.Quote(string(p)) } - -// A GoPackageName is the name of a Go package. e.g., "protobuf". -type GoPackageName string - -// Each type we import as a protocol buffer (other than FileDescriptorProto) needs -// a pointer to the FileDescriptorProto that represents it. These types achieve that -// wrapping by placing each Proto inside a struct with the pointer to its File. The -// structs have the same names as their contents, with "Proto" removed. -// FileDescriptor is used to store the things that it points to. - -// The file and package name method are common to messages and enums. -type common struct { - file *FileDescriptor // File this object comes from. -} - -// GoImportPath is the import path of the Go package containing the type. -func (c *common) GoImportPath() GoImportPath { - return c.file.importPath -} - -func (c *common) File() *FileDescriptor { return c.file } - -func fileIsProto3(file *descriptor.FileDescriptorProto) bool { - return file.GetSyntax() == "proto3" -} - -func (c *common) proto3() bool { return fileIsProto3(c.file.FileDescriptorProto) } - -// Descriptor represents a protocol buffer message. -type Descriptor struct { - common - *descriptor.DescriptorProto - parent *Descriptor // The containing message, if any. - nested []*Descriptor // Inner messages, if any. - enums []*EnumDescriptor // Inner enums, if any. - ext []*ExtensionDescriptor // Extensions, if any. - typename []string // Cached typename vector. - index int // The index into the container, whether the file or another message. - path string // The SourceCodeInfo path as comma-separated integers. - group bool -} - -// TypeName returns the elements of the dotted type name. -// The package name is not part of this name. -func (d *Descriptor) TypeName() []string { - if d.typename != nil { - return d.typename - } - n := 0 - for parent := d; parent != nil; parent = parent.parent { - n++ - } - s := make([]string, n) - for parent := d; parent != nil; parent = parent.parent { - n-- - s[n] = parent.GetName() - } - d.typename = s - return s -} - -// EnumDescriptor describes an enum. If it's at top level, its parent will be nil. -// Otherwise it will be the descriptor of the message in which it is defined. -type EnumDescriptor struct { - common - *descriptor.EnumDescriptorProto - parent *Descriptor // The containing message, if any. - typename []string // Cached typename vector. - index int // The index into the container, whether the file or a message. - path string // The SourceCodeInfo path as comma-separated integers. -} - -// TypeName returns the elements of the dotted type name. -// The package name is not part of this name. -func (e *EnumDescriptor) TypeName() (s []string) { - if e.typename != nil { - return e.typename - } - name := e.GetName() - if e.parent == nil { - s = make([]string, 1) - } else { - pname := e.parent.TypeName() - s = make([]string, len(pname)+1) - copy(s, pname) - } - s[len(s)-1] = name - e.typename = s - return s -} - -// Everything but the last element of the full type name, CamelCased. -// The values of type Foo.Bar are call Foo_value1... not Foo_Bar_value1... . -func (e *EnumDescriptor) prefix() string { - if e.parent == nil { - // If the enum is not part of a message, the prefix is just the type name. - return CamelCase(*e.Name) + "_" - } - typeName := e.TypeName() - return CamelCaseSlice(typeName[0:len(typeName)-1]) + "_" -} - -// The integer value of the named constant in this enumerated type. -func (e *EnumDescriptor) integerValueAsString(name string) string { - for _, c := range e.Value { - if c.GetName() == name { - return fmt.Sprint(c.GetNumber()) - } - } - log.Fatal("cannot find value for enum constant") - return "" -} - -// ExtensionDescriptor describes an extension. If it's at top level, its parent will be nil. -// Otherwise it will be the descriptor of the message in which it is defined. -type ExtensionDescriptor struct { - common - *descriptor.FieldDescriptorProto - parent *Descriptor // The containing message, if any. -} - -// TypeName returns the elements of the dotted type name. -// The package name is not part of this name. -func (e *ExtensionDescriptor) TypeName() (s []string) { - name := e.GetName() - if e.parent == nil { - // top-level extension - s = make([]string, 1) - } else { - pname := e.parent.TypeName() - s = make([]string, len(pname)+1) - copy(s, pname) - } - s[len(s)-1] = name - return s -} - -// DescName returns the variable name used for the generated descriptor. -func (e *ExtensionDescriptor) DescName() string { - // The full type name. - typeName := e.TypeName() - // Each scope of the extension is individually CamelCased, and all are joined with "_" with an "E_" prefix. - for i, s := range typeName { - typeName[i] = CamelCase(s) - } - return "E_" + strings.Join(typeName, "_") -} - -// ImportedDescriptor describes a type that has been publicly imported from another file. -type ImportedDescriptor struct { - common - o Object -} - -func (id *ImportedDescriptor) TypeName() []string { return id.o.TypeName() } - -// FileDescriptor describes an protocol buffer descriptor file (.proto). -// It includes slices of all the messages and enums defined within it. -// Those slices are constructed by WrapTypes. -type FileDescriptor struct { - *descriptor.FileDescriptorProto - desc []*Descriptor // All the messages defined in this file. - enum []*EnumDescriptor // All the enums defined in this file. - ext []*ExtensionDescriptor // All the top-level extensions defined in this file. - imp []*ImportedDescriptor // All types defined in files publicly imported by this file. - - // Comments, stored as a map of path (comma-separated integers) to the comment. - comments map[string]*descriptor.SourceCodeInfo_Location - - // The full list of symbols that are exported, - // as a map from the exported object to its symbols. - // This is used for supporting public imports. - exported map[Object][]symbol - - importPath GoImportPath // Import path of this file's package. - packageName GoPackageName // Name of this file's Go package. - - proto3 bool // whether to generate proto3 code for this file -} - -// VarName is the variable name we'll use in the generated code to refer -// to the compressed bytes of this descriptor. It is not exported, so -// it is only valid inside the generated package. -func (d *FileDescriptor) VarName() string { - h := sha256.Sum256([]byte(d.GetName())) - return fmt.Sprintf("fileDescriptor_%s", hex.EncodeToString(h[:8])) -} - -// goPackageOption interprets the file's go_package option. -// If there is no go_package, it returns ("", "", false). -// If there's a simple name, it returns ("", pkg, true). -// If the option implies an import path, it returns (impPath, pkg, true). -func (d *FileDescriptor) goPackageOption() (impPath GoImportPath, pkg GoPackageName, ok bool) { - opt := d.GetOptions().GetGoPackage() - if opt == "" { - return "", "", false - } - // A semicolon-delimited suffix delimits the import path and package name. - sc := strings.Index(opt, ";") - if sc >= 0 { - return GoImportPath(opt[:sc]), cleanPackageName(opt[sc+1:]), true - } - // The presence of a slash implies there's an import path. - slash := strings.LastIndex(opt, "/") - if slash >= 0 { - return GoImportPath(opt), cleanPackageName(opt[slash+1:]), true - } - return "", cleanPackageName(opt), true -} - -// goFileName returns the output name for the generated Go file. -func (d *FileDescriptor) goFileName(pathType pathType) string { - name := *d.Name - if ext := path.Ext(name); ext == ".proto" || ext == ".protodevel" { - name = name[:len(name)-len(ext)] - } - name += ".pb.go" - - if pathType == pathTypeSourceRelative { - return name - } - - // Does the file have a "go_package" option? - // If it does, it may override the filename. - if impPath, _, ok := d.goPackageOption(); ok && impPath != "" { - // Replace the existing dirname with the declared import path. - _, name = path.Split(name) - name = path.Join(string(impPath), name) - return name - } - - return name -} - -func (d *FileDescriptor) addExport(obj Object, sym symbol) { - d.exported[obj] = append(d.exported[obj], sym) -} - -// symbol is an interface representing an exported Go symbol. -type symbol interface { - // GenerateAlias should generate an appropriate alias - // for the symbol from the named package. - GenerateAlias(g *Generator, filename string, pkg GoPackageName) -} - -type messageSymbol struct { - sym string - hasExtensions, isMessageSet bool - oneofTypes []string -} - -type getterSymbol struct { - name string - typ string - typeName string // canonical name in proto world; empty for proto.Message and similar - genType bool // whether typ contains a generated type (message/group/enum) -} - -func (ms *messageSymbol) GenerateAlias(g *Generator, filename string, pkg GoPackageName) { - g.P("// ", ms.sym, " from public import ", filename) - g.P("type ", ms.sym, " = ", pkg, ".", ms.sym) - for _, name := range ms.oneofTypes { - g.P("type ", name, " = ", pkg, ".", name) - } -} - -type enumSymbol struct { - name string - proto3 bool // Whether this came from a proto3 file. -} - -func (es enumSymbol) GenerateAlias(g *Generator, filename string, pkg GoPackageName) { - s := es.name - g.P("// ", s, " from public import ", filename) - g.P("type ", s, " = ", pkg, ".", s) - g.P("var ", s, "_name = ", pkg, ".", s, "_name") - g.P("var ", s, "_value = ", pkg, ".", s, "_value") -} - -type constOrVarSymbol struct { - sym string - typ string // either "const" or "var" - cast string // if non-empty, a type cast is required (used for enums) -} - -func (cs constOrVarSymbol) GenerateAlias(g *Generator, filename string, pkg GoPackageName) { - v := string(pkg) + "." + cs.sym - if cs.cast != "" { - v = cs.cast + "(" + v + ")" - } - g.P(cs.typ, " ", cs.sym, " = ", v) -} - -// Object is an interface abstracting the abilities shared by enums, messages, extensions and imported objects. -type Object interface { - GoImportPath() GoImportPath - TypeName() []string - File() *FileDescriptor -} - -// Generator is the type whose methods generate the output, stored in the associated response structure. -type Generator struct { - *bytes.Buffer - - Request *plugin.CodeGeneratorRequest // The input. - Response *plugin.CodeGeneratorResponse // The output. - - Param map[string]string // Command-line parameters. - PackageImportPath string // Go import path of the package we're generating code for - ImportPrefix string // String to prefix to imported package file names. - ImportMap map[string]string // Mapping from .proto file name to import path - - Pkg map[string]string // The names under which we import support packages - - outputImportPath GoImportPath // Package we're generating code for. - allFiles []*FileDescriptor // All files in the tree - allFilesByName map[string]*FileDescriptor // All files by filename. - genFiles []*FileDescriptor // Those files we will generate output for. - file *FileDescriptor // The file we are compiling now. - packageNames map[GoImportPath]GoPackageName // Imported package names in the current file. - usedPackages map[GoImportPath]bool // Packages used in current file. - usedPackageNames map[GoPackageName]bool // Package names used in the current file. - addedImports map[GoImportPath]bool // Additional imports to emit. - typeNameToObject map[string]Object // Key is a fully-qualified name in input syntax. - init []string // Lines to emit in the init function. - indent string - pathType pathType // How to generate output filenames. - writeOutput bool - annotateCode bool // whether to store annotations - annotations []*descriptor.GeneratedCodeInfo_Annotation // annotations to store -} - -type pathType int - -const ( - pathTypeImport pathType = iota - pathTypeSourceRelative -) - -// New creates a new generator and allocates the request and response protobufs. -func New() *Generator { - g := new(Generator) - g.Buffer = new(bytes.Buffer) - g.Request = new(plugin.CodeGeneratorRequest) - g.Response = new(plugin.CodeGeneratorResponse) - return g -} - -// Error reports a problem, including an error, and exits the program. -func (g *Generator) Error(err error, msgs ...string) { - s := strings.Join(msgs, " ") + ":" + err.Error() - log.Print("protoc-gen-go: error:", s) - os.Exit(1) -} - -// Fail reports a problem and exits the program. -func (g *Generator) Fail(msgs ...string) { - s := strings.Join(msgs, " ") - log.Print("protoc-gen-go: error:", s) - os.Exit(1) -} - -// CommandLineParameters breaks the comma-separated list of key=value pairs -// in the parameter (a member of the request protobuf) into a key/value map. -// It then sets file name mappings defined by those entries. -func (g *Generator) CommandLineParameters(parameter string) { - g.Param = make(map[string]string) - for _, p := range strings.Split(parameter, ",") { - if i := strings.Index(p, "="); i < 0 { - g.Param[p] = "" - } else { - g.Param[p[0:i]] = p[i+1:] - } - } - - g.ImportMap = make(map[string]string) - pluginList := "none" // Default list of plugin names to enable (empty means all). - for k, v := range g.Param { - switch k { - case "import_prefix": - g.ImportPrefix = v - case "import_path": - g.PackageImportPath = v - case "paths": - switch v { - case "import": - g.pathType = pathTypeImport - case "source_relative": - g.pathType = pathTypeSourceRelative - default: - g.Fail(fmt.Sprintf(`Unknown path type %q: want "import" or "source_relative".`, v)) - } - case "plugins": - pluginList = v - case "annotate_code": - if v == "true" { - g.annotateCode = true - } - default: - if len(k) > 0 && k[0] == 'M' { - g.ImportMap[k[1:]] = v - } - } - } - if pluginList != "" { - // Amend the set of plugins. - enabled := make(map[string]bool) - for _, name := range strings.Split(pluginList, "+") { - enabled[name] = true - } - var nplugins []Plugin - for _, p := range plugins { - if enabled[p.Name()] { - nplugins = append(nplugins, p) - } - } - plugins = nplugins - } -} - -// DefaultPackageName returns the package name printed for the object. -// If its file is in a different package, it returns the package name we're using for this file, plus ".". -// Otherwise it returns the empty string. -func (g *Generator) DefaultPackageName(obj Object) string { - importPath := obj.GoImportPath() - if importPath == g.outputImportPath { - return "" - } - return string(g.GoPackageName(importPath)) + "." -} - -// GoPackageName returns the name used for a package. -func (g *Generator) GoPackageName(importPath GoImportPath) GoPackageName { - if name, ok := g.packageNames[importPath]; ok { - return name - } - name := cleanPackageName(baseName(string(importPath))) - for i, orig := 1, name; g.usedPackageNames[name] || isGoPredeclaredIdentifier[string(name)]; i++ { - name = orig + GoPackageName(strconv.Itoa(i)) - } - g.packageNames[importPath] = name - g.usedPackageNames[name] = true - return name -} - -// AddImport adds a package to the generated file's import section. -// It returns the name used for the package. -func (g *Generator) AddImport(importPath GoImportPath) GoPackageName { - g.addedImports[importPath] = true - return g.GoPackageName(importPath) -} - -var globalPackageNames = map[GoPackageName]bool{ - "fmt": true, - "math": true, - "proto": true, -} - -// Create and remember a guaranteed unique package name. Pkg is the candidate name. -// The FileDescriptor parameter is unused. -func RegisterUniquePackageName(pkg string, f *FileDescriptor) string { - name := cleanPackageName(pkg) - for i, orig := 1, name; globalPackageNames[name]; i++ { - name = orig + GoPackageName(strconv.Itoa(i)) - } - globalPackageNames[name] = true - return string(name) -} - -var isGoKeyword = map[string]bool{ - "break": true, - "case": true, - "chan": true, - "const": true, - "continue": true, - "default": true, - "else": true, - "defer": true, - "fallthrough": true, - "for": true, - "func": true, - "go": true, - "goto": true, - "if": true, - "import": true, - "interface": true, - "map": true, - "package": true, - "range": true, - "return": true, - "select": true, - "struct": true, - "switch": true, - "type": true, - "var": true, -} - -var isGoPredeclaredIdentifier = map[string]bool{ - "append": true, - "bool": true, - "byte": true, - "cap": true, - "close": true, - "complex": true, - "complex128": true, - "complex64": true, - "copy": true, - "delete": true, - "error": true, - "false": true, - "float32": true, - "float64": true, - "imag": true, - "int": true, - "int16": true, - "int32": true, - "int64": true, - "int8": true, - "iota": true, - "len": true, - "make": true, - "new": true, - "nil": true, - "panic": true, - "print": true, - "println": true, - "real": true, - "recover": true, - "rune": true, - "string": true, - "true": true, - "uint": true, - "uint16": true, - "uint32": true, - "uint64": true, - "uint8": true, - "uintptr": true, -} - -func cleanPackageName(name string) GoPackageName { - name = strings.Map(badToUnderscore, name) - // Identifier must not be keyword or predeclared identifier: insert _. - if isGoKeyword[name] { - name = "_" + name - } - // Identifier must not begin with digit: insert _. - if r, _ := utf8.DecodeRuneInString(name); unicode.IsDigit(r) { - name = "_" + name - } - return GoPackageName(name) -} - -// defaultGoPackage returns the package name to use, -// derived from the import path of the package we're building code for. -func (g *Generator) defaultGoPackage() GoPackageName { - p := g.PackageImportPath - if i := strings.LastIndex(p, "/"); i >= 0 { - p = p[i+1:] - } - return cleanPackageName(p) -} - -// SetPackageNames sets the package name for this run. -// The package name must agree across all files being generated. -// It also defines unique package names for all imported files. -func (g *Generator) SetPackageNames() { - g.outputImportPath = g.genFiles[0].importPath - - defaultPackageNames := make(map[GoImportPath]GoPackageName) - for _, f := range g.genFiles { - if _, p, ok := f.goPackageOption(); ok { - defaultPackageNames[f.importPath] = p - } - } - for _, f := range g.genFiles { - if _, p, ok := f.goPackageOption(); ok { - // Source file: option go_package = "quux/bar"; - f.packageName = p - } else if p, ok := defaultPackageNames[f.importPath]; ok { - // A go_package option in another file in the same package. - // - // This is a poor choice in general, since every source file should - // contain a go_package option. Supported mainly for historical - // compatibility. - f.packageName = p - } else if p := g.defaultGoPackage(); p != "" { - // Command-line: import_path=quux/bar. - // - // The import_path flag sets a package name for files which don't - // contain a go_package option. - f.packageName = p - } else if p := f.GetPackage(); p != "" { - // Source file: package quux.bar; - f.packageName = cleanPackageName(p) - } else { - // Source filename. - f.packageName = cleanPackageName(baseName(f.GetName())) - } - } - - // Check that all files have a consistent package name and import path. - for _, f := range g.genFiles[1:] { - if a, b := g.genFiles[0].importPath, f.importPath; a != b { - g.Fail(fmt.Sprintf("inconsistent package import paths: %v, %v", a, b)) - } - if a, b := g.genFiles[0].packageName, f.packageName; a != b { - g.Fail(fmt.Sprintf("inconsistent package names: %v, %v", a, b)) - } - } - - // Names of support packages. These never vary (if there are conflicts, - // we rename the conflicting package), so this could be removed someday. - g.Pkg = map[string]string{ - "fmt": "fmt", - "math": "math", - "proto": "proto", - } -} - -// WrapTypes walks the incoming data, wrapping DescriptorProtos, EnumDescriptorProtos -// and FileDescriptorProtos into file-referenced objects within the Generator. -// It also creates the list of files to generate and so should be called before GenerateAllFiles. -func (g *Generator) WrapTypes() { - g.allFiles = make([]*FileDescriptor, 0, len(g.Request.ProtoFile)) - g.allFilesByName = make(map[string]*FileDescriptor, len(g.allFiles)) - genFileNames := make(map[string]bool) - for _, n := range g.Request.FileToGenerate { - genFileNames[n] = true - } - for _, f := range g.Request.ProtoFile { - fd := &FileDescriptor{ - FileDescriptorProto: f, - exported: make(map[Object][]symbol), - proto3: fileIsProto3(f), - } - // The import path may be set in a number of ways. - if substitution, ok := g.ImportMap[f.GetName()]; ok { - // Command-line: M=foo.proto=quux/bar. - // - // Explicit mapping of source file to import path. - fd.importPath = GoImportPath(substitution) - } else if genFileNames[f.GetName()] && g.PackageImportPath != "" { - // Command-line: import_path=quux/bar. - // - // The import_path flag sets the import path for every file that - // we generate code for. - fd.importPath = GoImportPath(g.PackageImportPath) - } else if p, _, _ := fd.goPackageOption(); p != "" { - // Source file: option go_package = "quux/bar"; - // - // The go_package option sets the import path. Most users should use this. - fd.importPath = p - } else { - // Source filename. - // - // Last resort when nothing else is available. - fd.importPath = GoImportPath(path.Dir(f.GetName())) - } - // We must wrap the descriptors before we wrap the enums - fd.desc = wrapDescriptors(fd) - g.buildNestedDescriptors(fd.desc) - fd.enum = wrapEnumDescriptors(fd, fd.desc) - g.buildNestedEnums(fd.desc, fd.enum) - fd.ext = wrapExtensions(fd) - extractComments(fd) - g.allFiles = append(g.allFiles, fd) - g.allFilesByName[f.GetName()] = fd - } - for _, fd := range g.allFiles { - fd.imp = wrapImported(fd, g) - } - - g.genFiles = make([]*FileDescriptor, 0, len(g.Request.FileToGenerate)) - for _, fileName := range g.Request.FileToGenerate { - fd := g.allFilesByName[fileName] - if fd == nil { - g.Fail("could not find file named", fileName) - } - g.genFiles = append(g.genFiles, fd) - } -} - -// Scan the descriptors in this file. For each one, build the slice of nested descriptors -func (g *Generator) buildNestedDescriptors(descs []*Descriptor) { - for _, desc := range descs { - if len(desc.NestedType) != 0 { - for _, nest := range descs { - if nest.parent == desc { - desc.nested = append(desc.nested, nest) - } - } - if len(desc.nested) != len(desc.NestedType) { - g.Fail("internal error: nesting failure for", desc.GetName()) - } - } - } -} - -func (g *Generator) buildNestedEnums(descs []*Descriptor, enums []*EnumDescriptor) { - for _, desc := range descs { - if len(desc.EnumType) != 0 { - for _, enum := range enums { - if enum.parent == desc { - desc.enums = append(desc.enums, enum) - } - } - if len(desc.enums) != len(desc.EnumType) { - g.Fail("internal error: enum nesting failure for", desc.GetName()) - } - } - } -} - -// Construct the Descriptor -func newDescriptor(desc *descriptor.DescriptorProto, parent *Descriptor, file *FileDescriptor, index int) *Descriptor { - d := &Descriptor{ - common: common{file}, - DescriptorProto: desc, - parent: parent, - index: index, - } - if parent == nil { - d.path = fmt.Sprintf("%d,%d", messagePath, index) - } else { - d.path = fmt.Sprintf("%s,%d,%d", parent.path, messageMessagePath, index) - } - - // The only way to distinguish a group from a message is whether - // the containing message has a TYPE_GROUP field that matches. - if parent != nil { - parts := d.TypeName() - if file.Package != nil { - parts = append([]string{*file.Package}, parts...) - } - exp := "." + strings.Join(parts, ".") - for _, field := range parent.Field { - if field.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP && field.GetTypeName() == exp { - d.group = true - break - } - } - } - - for _, field := range desc.Extension { - d.ext = append(d.ext, &ExtensionDescriptor{common{file}, field, d}) - } - - return d -} - -// Return a slice of all the Descriptors defined within this file -func wrapDescriptors(file *FileDescriptor) []*Descriptor { - sl := make([]*Descriptor, 0, len(file.MessageType)+10) - for i, desc := range file.MessageType { - sl = wrapThisDescriptor(sl, desc, nil, file, i) - } - return sl -} - -// Wrap this Descriptor, recursively -func wrapThisDescriptor(sl []*Descriptor, desc *descriptor.DescriptorProto, parent *Descriptor, file *FileDescriptor, index int) []*Descriptor { - sl = append(sl, newDescriptor(desc, parent, file, index)) - me := sl[len(sl)-1] - for i, nested := range desc.NestedType { - sl = wrapThisDescriptor(sl, nested, me, file, i) - } - return sl -} - -// Construct the EnumDescriptor -func newEnumDescriptor(desc *descriptor.EnumDescriptorProto, parent *Descriptor, file *FileDescriptor, index int) *EnumDescriptor { - ed := &EnumDescriptor{ - common: common{file}, - EnumDescriptorProto: desc, - parent: parent, - index: index, - } - if parent == nil { - ed.path = fmt.Sprintf("%d,%d", enumPath, index) - } else { - ed.path = fmt.Sprintf("%s,%d,%d", parent.path, messageEnumPath, index) - } - return ed -} - -// Return a slice of all the EnumDescriptors defined within this file -func wrapEnumDescriptors(file *FileDescriptor, descs []*Descriptor) []*EnumDescriptor { - sl := make([]*EnumDescriptor, 0, len(file.EnumType)+10) - // Top-level enums. - for i, enum := range file.EnumType { - sl = append(sl, newEnumDescriptor(enum, nil, file, i)) - } - // Enums within messages. Enums within embedded messages appear in the outer-most message. - for _, nested := range descs { - for i, enum := range nested.EnumType { - sl = append(sl, newEnumDescriptor(enum, nested, file, i)) - } - } - return sl -} - -// Return a slice of all the top-level ExtensionDescriptors defined within this file. -func wrapExtensions(file *FileDescriptor) []*ExtensionDescriptor { - var sl []*ExtensionDescriptor - for _, field := range file.Extension { - sl = append(sl, &ExtensionDescriptor{common{file}, field, nil}) - } - return sl -} - -// Return a slice of all the types that are publicly imported into this file. -func wrapImported(file *FileDescriptor, g *Generator) (sl []*ImportedDescriptor) { - for _, index := range file.PublicDependency { - df := g.fileByName(file.Dependency[index]) - for _, d := range df.desc { - if d.GetOptions().GetMapEntry() { - continue - } - sl = append(sl, &ImportedDescriptor{common{file}, d}) - } - for _, e := range df.enum { - sl = append(sl, &ImportedDescriptor{common{file}, e}) - } - for _, ext := range df.ext { - sl = append(sl, &ImportedDescriptor{common{file}, ext}) - } - } - return -} - -func extractComments(file *FileDescriptor) { - file.comments = make(map[string]*descriptor.SourceCodeInfo_Location) - for _, loc := range file.GetSourceCodeInfo().GetLocation() { - if loc.LeadingComments == nil { - continue - } - var p []string - for _, n := range loc.Path { - p = append(p, strconv.Itoa(int(n))) - } - file.comments[strings.Join(p, ",")] = loc - } -} - -// BuildTypeNameMap builds the map from fully qualified type names to objects. -// The key names for the map come from the input data, which puts a period at the beginning. -// It should be called after SetPackageNames and before GenerateAllFiles. -func (g *Generator) BuildTypeNameMap() { - g.typeNameToObject = make(map[string]Object) - for _, f := range g.allFiles { - // The names in this loop are defined by the proto world, not us, so the - // package name may be empty. If so, the dotted package name of X will - // be ".X"; otherwise it will be ".pkg.X". - dottedPkg := "." + f.GetPackage() - if dottedPkg != "." { - dottedPkg += "." - } - for _, enum := range f.enum { - name := dottedPkg + dottedSlice(enum.TypeName()) - g.typeNameToObject[name] = enum - } - for _, desc := range f.desc { - name := dottedPkg + dottedSlice(desc.TypeName()) - g.typeNameToObject[name] = desc - } - } -} - -// ObjectNamed, given a fully-qualified input type name as it appears in the input data, -// returns the descriptor for the message or enum with that name. -func (g *Generator) ObjectNamed(typeName string) Object { - o, ok := g.typeNameToObject[typeName] - if !ok { - g.Fail("can't find object with type", typeName) - } - return o -} - -// AnnotatedAtoms is a list of atoms (as consumed by P) that records the file name and proto AST path from which they originated. -type AnnotatedAtoms struct { - source string - path string - atoms []interface{} -} - -// Annotate records the file name and proto AST path of a list of atoms -// so that a later call to P can emit a link from each atom to its origin. -func Annotate(file *FileDescriptor, path string, atoms ...interface{}) *AnnotatedAtoms { - return &AnnotatedAtoms{source: *file.Name, path: path, atoms: atoms} -} - -// printAtom prints the (atomic, non-annotation) argument to the generated output. -func (g *Generator) printAtom(v interface{}) { - switch v := v.(type) { - case string: - g.WriteString(v) - case *string: - g.WriteString(*v) - case bool: - fmt.Fprint(g, v) - case *bool: - fmt.Fprint(g, *v) - case int: - fmt.Fprint(g, v) - case *int32: - fmt.Fprint(g, *v) - case *int64: - fmt.Fprint(g, *v) - case float64: - fmt.Fprint(g, v) - case *float64: - fmt.Fprint(g, *v) - case GoPackageName: - g.WriteString(string(v)) - case GoImportPath: - g.WriteString(strconv.Quote(string(v))) - default: - g.Fail(fmt.Sprintf("unknown type in printer: %T", v)) - } -} - -// P prints the arguments to the generated output. It handles strings and int32s, plus -// handling indirections because they may be *string, etc. Any inputs of type AnnotatedAtoms may emit -// annotations in a .meta file in addition to outputting the atoms themselves (if g.annotateCode -// is true). -func (g *Generator) P(str ...interface{}) { - if !g.writeOutput { - return - } - g.WriteString(g.indent) - for _, v := range str { - switch v := v.(type) { - case *AnnotatedAtoms: - begin := int32(g.Len()) - for _, v := range v.atoms { - g.printAtom(v) - } - if g.annotateCode { - end := int32(g.Len()) - var path []int32 - for _, token := range strings.Split(v.path, ",") { - val, err := strconv.ParseInt(token, 10, 32) - if err != nil { - g.Fail("could not parse proto AST path: ", err.Error()) - } - path = append(path, int32(val)) - } - g.annotations = append(g.annotations, &descriptor.GeneratedCodeInfo_Annotation{ - Path: path, - SourceFile: &v.source, - Begin: &begin, - End: &end, - }) - } - default: - g.printAtom(v) - } - } - g.WriteByte('\n') -} - -// addInitf stores the given statement to be printed inside the file's init function. -// The statement is given as a format specifier and arguments. -func (g *Generator) addInitf(stmt string, a ...interface{}) { - g.init = append(g.init, fmt.Sprintf(stmt, a...)) -} - -// In Indents the output one tab stop. -func (g *Generator) In() { g.indent += "\t" } - -// Out unindents the output one tab stop. -func (g *Generator) Out() { - if len(g.indent) > 0 { - g.indent = g.indent[1:] - } -} - -// GenerateAllFiles generates the output for all the files we're outputting. -func (g *Generator) GenerateAllFiles() { - // Initialize the plugins - for _, p := range plugins { - p.Init(g) - } - // Generate the output. The generator runs for every file, even the files - // that we don't generate output for, so that we can collate the full list - // of exported symbols to support public imports. - genFileMap := make(map[*FileDescriptor]bool, len(g.genFiles)) - for _, file := range g.genFiles { - genFileMap[file] = true - } - for _, file := range g.allFiles { - g.Reset() - g.annotations = nil - g.writeOutput = genFileMap[file] - g.generate(file) - if !g.writeOutput { - continue - } - fname := file.goFileName(g.pathType) - g.Response.File = append(g.Response.File, &plugin.CodeGeneratorResponse_File{ - Name: proto.String(fname), - Content: proto.String(g.String()), - }) - if g.annotateCode { - // Store the generated code annotations in text, as the protoc plugin protocol requires that - // strings contain valid UTF-8. - g.Response.File = append(g.Response.File, &plugin.CodeGeneratorResponse_File{ - Name: proto.String(file.goFileName(g.pathType) + ".meta"), - Content: proto.String(proto.CompactTextString(&descriptor.GeneratedCodeInfo{Annotation: g.annotations})), - }) - } - } -} - -// Run all the plugins associated with the file. -func (g *Generator) runPlugins(file *FileDescriptor) { - for _, p := range plugins { - p.Generate(file) - } -} - -// Fill the response protocol buffer with the generated output for all the files we're -// supposed to generate. -func (g *Generator) generate(file *FileDescriptor) { - g.file = file - g.usedPackages = make(map[GoImportPath]bool) - g.packageNames = make(map[GoImportPath]GoPackageName) - g.usedPackageNames = make(map[GoPackageName]bool) - g.addedImports = make(map[GoImportPath]bool) - for name := range globalPackageNames { - g.usedPackageNames[name] = true - } - - g.P("// This is a compile-time assertion to ensure that this generated file") - g.P("// is compatible with the proto package it is being compiled against.") - g.P("// A compilation error at this line likely means your copy of the") - g.P("// proto package needs to be updated.") - g.P("const _ = ", g.Pkg["proto"], ".ProtoPackageIsVersion", generatedCodeVersion, " // please upgrade the proto package") - g.P() - - for _, td := range g.file.imp { - g.generateImported(td) - } - for _, enum := range g.file.enum { - g.generateEnum(enum) - } - for _, desc := range g.file.desc { - // Don't generate virtual messages for maps. - if desc.GetOptions().GetMapEntry() { - continue - } - g.generateMessage(desc) - } - for _, ext := range g.file.ext { - g.generateExtension(ext) - } - g.generateInitFunction() - g.generateFileDescriptor(file) - - // Run the plugins before the imports so we know which imports are necessary. - g.runPlugins(file) - - // Generate header and imports last, though they appear first in the output. - rem := g.Buffer - remAnno := g.annotations - g.Buffer = new(bytes.Buffer) - g.annotations = nil - g.generateHeader() - g.generateImports() - if !g.writeOutput { - return - } - // Adjust the offsets for annotations displaced by the header and imports. - for _, anno := range remAnno { - *anno.Begin += int32(g.Len()) - *anno.End += int32(g.Len()) - g.annotations = append(g.annotations, anno) - } - g.Write(rem.Bytes()) - - // Reformat generated code and patch annotation locations. - fset := token.NewFileSet() - original := g.Bytes() - if g.annotateCode { - // make a copy independent of g; we'll need it after Reset. - original = append([]byte(nil), original...) - } - fileAST, err := parser.ParseFile(fset, "", original, parser.ParseComments) - if err != nil { - // Print out the bad code with line numbers. - // This should never happen in practice, but it can while changing generated code, - // so consider this a debugging aid. - var src bytes.Buffer - s := bufio.NewScanner(bytes.NewReader(original)) - for line := 1; s.Scan(); line++ { - fmt.Fprintf(&src, "%5d\t%s\n", line, s.Bytes()) - } - g.Fail("bad Go source code was generated:", err.Error(), "\n"+src.String()) - } - ast.SortImports(fset, fileAST) - g.Reset() - err = (&printer.Config{Mode: printer.TabIndent | printer.UseSpaces, Tabwidth: 8}).Fprint(g, fset, fileAST) - if err != nil { - g.Fail("generated Go source code could not be reformatted:", err.Error()) - } - if g.annotateCode { - m, err := remap.Compute(original, g.Bytes()) - if err != nil { - g.Fail("formatted generated Go source code could not be mapped back to the original code:", err.Error()) - } - for _, anno := range g.annotations { - new, ok := m.Find(int(*anno.Begin), int(*anno.End)) - if !ok { - g.Fail("span in formatted generated Go source code could not be mapped back to the original code") - } - *anno.Begin = int32(new.Pos) - *anno.End = int32(new.End) - } - } -} - -// Generate the header, including package definition -func (g *Generator) generateHeader() { - g.P("// Code generated by protoc-gen-go. DO NOT EDIT.") - if g.file.GetOptions().GetDeprecated() { - g.P("// ", g.file.Name, " is a deprecated file.") - } else { - g.P("// source: ", g.file.Name) - } - g.P() - g.PrintComments(strconv.Itoa(packagePath)) - g.P() - g.P("package ", g.file.packageName) - g.P() -} - -// deprecationComment is the standard comment added to deprecated -// messages, fields, enums, and enum values. -var deprecationComment = "// Deprecated: Do not use." - -// PrintComments prints any comments from the source .proto file. -// The path is a comma-separated list of integers. -// It returns an indication of whether any comments were printed. -// See descriptor.proto for its format. -func (g *Generator) PrintComments(path string) bool { - if !g.writeOutput { - return false - } - if c, ok := g.makeComments(path); ok { - g.P(c) - return true - } - return false -} - -// makeComments generates the comment string for the field, no "\n" at the end -func (g *Generator) makeComments(path string) (string, bool) { - loc, ok := g.file.comments[path] - if !ok { - return "", false - } - w := new(bytes.Buffer) - nl := "" - for _, line := range strings.Split(strings.TrimSuffix(loc.GetLeadingComments(), "\n"), "\n") { - fmt.Fprintf(w, "%s//%s", nl, line) - nl = "\n" - } - return w.String(), true -} - -func (g *Generator) fileByName(filename string) *FileDescriptor { - return g.allFilesByName[filename] -} - -// weak returns whether the ith import of the current file is a weak import. -func (g *Generator) weak(i int32) bool { - for _, j := range g.file.WeakDependency { - if j == i { - return true - } - } - return false -} - -// Generate the imports -func (g *Generator) generateImports() { - imports := make(map[GoImportPath]GoPackageName) - for i, s := range g.file.Dependency { - fd := g.fileByName(s) - importPath := fd.importPath - // Do not import our own package. - if importPath == g.file.importPath { - continue - } - // Do not import weak imports. - if g.weak(int32(i)) { - continue - } - // Do not import a package twice. - if _, ok := imports[importPath]; ok { - continue - } - // We need to import all the dependencies, even if we don't reference them, - // because other code and tools depend on having the full transitive closure - // of protocol buffer types in the binary. - packageName := g.GoPackageName(importPath) - if _, ok := g.usedPackages[importPath]; !ok { - packageName = "_" - } - imports[importPath] = packageName - } - for importPath := range g.addedImports { - imports[importPath] = g.GoPackageName(importPath) - } - // We almost always need a proto import. Rather than computing when we - // do, which is tricky when there's a plugin, just import it and - // reference it later. The same argument applies to the fmt and math packages. - g.P("import (") - g.P(g.Pkg["fmt"] + ` "fmt"`) - g.P(g.Pkg["math"] + ` "math"`) - g.P(g.Pkg["proto"]+" ", GoImportPath(g.ImportPrefix)+"github.com/golang/protobuf/proto") - for importPath, packageName := range imports { - g.P(packageName, " ", GoImportPath(g.ImportPrefix)+importPath) - } - g.P(")") - g.P() - // TODO: may need to worry about uniqueness across plugins - for _, p := range plugins { - p.GenerateImports(g.file) - g.P() - } - g.P("// Reference imports to suppress errors if they are not otherwise used.") - g.P("var _ = ", g.Pkg["proto"], ".Marshal") - g.P("var _ = ", g.Pkg["fmt"], ".Errorf") - g.P("var _ = ", g.Pkg["math"], ".Inf") - g.P() -} - -func (g *Generator) generateImported(id *ImportedDescriptor) { - df := id.o.File() - filename := *df.Name - if df.importPath == g.file.importPath { - // Don't generate type aliases for files in the same Go package as this one. - return - } - if !supportTypeAliases { - g.Fail(fmt.Sprintf("%s: public imports require at least go1.9", filename)) - } - g.usedPackages[df.importPath] = true - - for _, sym := range df.exported[id.o] { - sym.GenerateAlias(g, filename, g.GoPackageName(df.importPath)) - } - - g.P() -} - -// Generate the enum definitions for this EnumDescriptor. -func (g *Generator) generateEnum(enum *EnumDescriptor) { - // The full type name - typeName := enum.TypeName() - // The full type name, CamelCased. - ccTypeName := CamelCaseSlice(typeName) - ccPrefix := enum.prefix() - - deprecatedEnum := "" - if enum.GetOptions().GetDeprecated() { - deprecatedEnum = deprecationComment - } - g.PrintComments(enum.path) - g.P("type ", Annotate(enum.file, enum.path, ccTypeName), " int32", deprecatedEnum) - g.file.addExport(enum, enumSymbol{ccTypeName, enum.proto3()}) - g.P("const (") - for i, e := range enum.Value { - etorPath := fmt.Sprintf("%s,%d,%d", enum.path, enumValuePath, i) - g.PrintComments(etorPath) - - deprecatedValue := "" - if e.GetOptions().GetDeprecated() { - deprecatedValue = deprecationComment - } - - name := ccPrefix + *e.Name - g.P(Annotate(enum.file, etorPath, name), " ", ccTypeName, " = ", e.Number, " ", deprecatedValue) - g.file.addExport(enum, constOrVarSymbol{name, "const", ccTypeName}) - } - g.P(")") - g.P() - g.P("var ", ccTypeName, "_name = map[int32]string{") - generated := make(map[int32]bool) // avoid duplicate values - for _, e := range enum.Value { - duplicate := "" - if _, present := generated[*e.Number]; present { - duplicate = "// Duplicate value: " - } - g.P(duplicate, e.Number, ": ", strconv.Quote(*e.Name), ",") - generated[*e.Number] = true - } - g.P("}") - g.P() - g.P("var ", ccTypeName, "_value = map[string]int32{") - for _, e := range enum.Value { - g.P(strconv.Quote(*e.Name), ": ", e.Number, ",") - } - g.P("}") - g.P() - - if !enum.proto3() { - g.P("func (x ", ccTypeName, ") Enum() *", ccTypeName, " {") - g.P("p := new(", ccTypeName, ")") - g.P("*p = x") - g.P("return p") - g.P("}") - g.P() - } - - g.P("func (x ", ccTypeName, ") String() string {") - g.P("return ", g.Pkg["proto"], ".EnumName(", ccTypeName, "_name, int32(x))") - g.P("}") - g.P() - - if !enum.proto3() { - g.P("func (x *", ccTypeName, ") UnmarshalJSON(data []byte) error {") - g.P("value, err := ", g.Pkg["proto"], ".UnmarshalJSONEnum(", ccTypeName, `_value, data, "`, ccTypeName, `")`) - g.P("if err != nil {") - g.P("return err") - g.P("}") - g.P("*x = ", ccTypeName, "(value)") - g.P("return nil") - g.P("}") - g.P() - } - - var indexes []string - for m := enum.parent; m != nil; m = m.parent { - // XXX: skip groups? - indexes = append([]string{strconv.Itoa(m.index)}, indexes...) - } - indexes = append(indexes, strconv.Itoa(enum.index)) - g.P("func (", ccTypeName, ") EnumDescriptor() ([]byte, []int) {") - g.P("return ", g.file.VarName(), ", []int{", strings.Join(indexes, ", "), "}") - g.P("}") - g.P() - if enum.file.GetPackage() == "google.protobuf" && enum.GetName() == "NullValue" { - g.P("func (", ccTypeName, `) XXX_WellKnownType() string { return "`, enum.GetName(), `" }`) - g.P() - } - - g.generateEnumRegistration(enum) -} - -// The tag is a string like "varint,2,opt,name=fieldname,def=7" that -// identifies details of the field for the protocol buffer marshaling and unmarshaling -// code. The fields are: -// wire encoding -// protocol tag number -// opt,req,rep for optional, required, or repeated -// packed whether the encoding is "packed" (optional; repeated primitives only) -// name= the original declared name -// enum= the name of the enum type if it is an enum-typed field. -// proto3 if this field is in a proto3 message -// def= string representation of the default value, if any. -// The default value must be in a representation that can be used at run-time -// to generate the default value. Thus bools become 0 and 1, for instance. -func (g *Generator) goTag(message *Descriptor, field *descriptor.FieldDescriptorProto, wiretype string) string { - optrepreq := "" - switch { - case isOptional(field): - optrepreq = "opt" - case isRequired(field): - optrepreq = "req" - case isRepeated(field): - optrepreq = "rep" - } - var defaultValue string - if dv := field.DefaultValue; dv != nil { // set means an explicit default - defaultValue = *dv - // Some types need tweaking. - switch *field.Type { - case descriptor.FieldDescriptorProto_TYPE_BOOL: - if defaultValue == "true" { - defaultValue = "1" - } else { - defaultValue = "0" - } - case descriptor.FieldDescriptorProto_TYPE_STRING, - descriptor.FieldDescriptorProto_TYPE_BYTES: - // Nothing to do. Quoting is done for the whole tag. - case descriptor.FieldDescriptorProto_TYPE_ENUM: - // For enums we need to provide the integer constant. - obj := g.ObjectNamed(field.GetTypeName()) - if id, ok := obj.(*ImportedDescriptor); ok { - // It is an enum that was publicly imported. - // We need the underlying type. - obj = id.o - } - enum, ok := obj.(*EnumDescriptor) - if !ok { - log.Printf("obj is a %T", obj) - if id, ok := obj.(*ImportedDescriptor); ok { - log.Printf("id.o is a %T", id.o) - } - g.Fail("unknown enum type", CamelCaseSlice(obj.TypeName())) - } - defaultValue = enum.integerValueAsString(defaultValue) - case descriptor.FieldDescriptorProto_TYPE_FLOAT: - if def := defaultValue; def != "inf" && def != "-inf" && def != "nan" { - if f, err := strconv.ParseFloat(defaultValue, 32); err == nil { - defaultValue = fmt.Sprint(float32(f)) - } - } - case descriptor.FieldDescriptorProto_TYPE_DOUBLE: - if def := defaultValue; def != "inf" && def != "-inf" && def != "nan" { - if f, err := strconv.ParseFloat(defaultValue, 64); err == nil { - defaultValue = fmt.Sprint(f) - } - } - } - defaultValue = ",def=" + defaultValue - } - enum := "" - if *field.Type == descriptor.FieldDescriptorProto_TYPE_ENUM { - // We avoid using obj.GoPackageName(), because we want to use the - // original (proto-world) package name. - obj := g.ObjectNamed(field.GetTypeName()) - if id, ok := obj.(*ImportedDescriptor); ok { - obj = id.o - } - enum = ",enum=" - if pkg := obj.File().GetPackage(); pkg != "" { - enum += pkg + "." - } - enum += CamelCaseSlice(obj.TypeName()) - } - packed := "" - if (field.Options != nil && field.Options.GetPacked()) || - // Per https://developers.google.com/protocol-buffers/docs/proto3#simple: - // "In proto3, repeated fields of scalar numeric types use packed encoding by default." - (message.proto3() && (field.Options == nil || field.Options.Packed == nil) && - isRepeated(field) && isScalar(field)) { - packed = ",packed" - } - fieldName := field.GetName() - name := fieldName - if *field.Type == descriptor.FieldDescriptorProto_TYPE_GROUP { - // We must use the type name for groups instead of - // the field name to preserve capitalization. - // type_name in FieldDescriptorProto is fully-qualified, - // but we only want the local part. - name = *field.TypeName - if i := strings.LastIndex(name, "."); i >= 0 { - name = name[i+1:] - } - } - if json := field.GetJsonName(); field.Extendee == nil && json != "" && json != name { - // TODO: escaping might be needed, in which case - // perhaps this should be in its own "json" tag. - name += ",json=" + json - } - name = ",name=" + name - if message.proto3() { - name += ",proto3" - } - oneof := "" - if field.OneofIndex != nil { - oneof = ",oneof" - } - return strconv.Quote(fmt.Sprintf("%s,%d,%s%s%s%s%s%s", - wiretype, - field.GetNumber(), - optrepreq, - packed, - name, - enum, - oneof, - defaultValue)) -} - -func needsStar(typ descriptor.FieldDescriptorProto_Type) bool { - switch typ { - case descriptor.FieldDescriptorProto_TYPE_GROUP: - return false - case descriptor.FieldDescriptorProto_TYPE_MESSAGE: - return false - case descriptor.FieldDescriptorProto_TYPE_BYTES: - return false - } - return true -} - -// TypeName is the printed name appropriate for an item. If the object is in the current file, -// TypeName drops the package name and underscores the rest. -// Otherwise the object is from another package; and the result is the underscored -// package name followed by the item name. -// The result always has an initial capital. -func (g *Generator) TypeName(obj Object) string { - return g.DefaultPackageName(obj) + CamelCaseSlice(obj.TypeName()) -} - -// GoType returns a string representing the type name, and the wire type -func (g *Generator) GoType(message *Descriptor, field *descriptor.FieldDescriptorProto) (typ string, wire string) { - // TODO: Options. - switch *field.Type { - case descriptor.FieldDescriptorProto_TYPE_DOUBLE: - typ, wire = "float64", "fixed64" - case descriptor.FieldDescriptorProto_TYPE_FLOAT: - typ, wire = "float32", "fixed32" - case descriptor.FieldDescriptorProto_TYPE_INT64: - typ, wire = "int64", "varint" - case descriptor.FieldDescriptorProto_TYPE_UINT64: - typ, wire = "uint64", "varint" - case descriptor.FieldDescriptorProto_TYPE_INT32: - typ, wire = "int32", "varint" - case descriptor.FieldDescriptorProto_TYPE_UINT32: - typ, wire = "uint32", "varint" - case descriptor.FieldDescriptorProto_TYPE_FIXED64: - typ, wire = "uint64", "fixed64" - case descriptor.FieldDescriptorProto_TYPE_FIXED32: - typ, wire = "uint32", "fixed32" - case descriptor.FieldDescriptorProto_TYPE_BOOL: - typ, wire = "bool", "varint" - case descriptor.FieldDescriptorProto_TYPE_STRING: - typ, wire = "string", "bytes" - case descriptor.FieldDescriptorProto_TYPE_GROUP: - desc := g.ObjectNamed(field.GetTypeName()) - typ, wire = "*"+g.TypeName(desc), "group" - case descriptor.FieldDescriptorProto_TYPE_MESSAGE: - desc := g.ObjectNamed(field.GetTypeName()) - typ, wire = "*"+g.TypeName(desc), "bytes" - case descriptor.FieldDescriptorProto_TYPE_BYTES: - typ, wire = "[]byte", "bytes" - case descriptor.FieldDescriptorProto_TYPE_ENUM: - desc := g.ObjectNamed(field.GetTypeName()) - typ, wire = g.TypeName(desc), "varint" - case descriptor.FieldDescriptorProto_TYPE_SFIXED32: - typ, wire = "int32", "fixed32" - case descriptor.FieldDescriptorProto_TYPE_SFIXED64: - typ, wire = "int64", "fixed64" - case descriptor.FieldDescriptorProto_TYPE_SINT32: - typ, wire = "int32", "zigzag32" - case descriptor.FieldDescriptorProto_TYPE_SINT64: - typ, wire = "int64", "zigzag64" - default: - g.Fail("unknown type for", field.GetName()) - } - if isRepeated(field) { - typ = "[]" + typ - } else if message != nil && message.proto3() { - return - } else if field.OneofIndex != nil && message != nil { - return - } else if needsStar(*field.Type) { - typ = "*" + typ - } - return -} - -func (g *Generator) RecordTypeUse(t string) { - if _, ok := g.typeNameToObject[t]; !ok { - return - } - importPath := g.ObjectNamed(t).GoImportPath() - if importPath == g.outputImportPath { - // Don't record use of objects in our package. - return - } - g.AddImport(importPath) - g.usedPackages[importPath] = true -} - -// Method names that may be generated. Fields with these names get an -// underscore appended. Any change to this set is a potential incompatible -// API change because it changes generated field names. -var methodNames = [...]string{ - "Reset", - "String", - "ProtoMessage", - "Marshal", - "Unmarshal", - "ExtensionRangeArray", - "ExtensionMap", - "Descriptor", -} - -// Names of messages in the `google.protobuf` package for which -// we will generate XXX_WellKnownType methods. -var wellKnownTypes = map[string]bool{ - "Any": true, - "Duration": true, - "Empty": true, - "Struct": true, - "Timestamp": true, - - "Value": true, - "ListValue": true, - "DoubleValue": true, - "FloatValue": true, - "Int64Value": true, - "UInt64Value": true, - "Int32Value": true, - "UInt32Value": true, - "BoolValue": true, - "StringValue": true, - "BytesValue": true, -} - -// getterDefault finds the default value for the field to return from a getter, -// regardless of if it's a built in default or explicit from the source. Returns e.g. "nil", `""`, "Default_MessageType_FieldName" -func (g *Generator) getterDefault(field *descriptor.FieldDescriptorProto, goMessageType string) string { - if isRepeated(field) { - return "nil" - } - if def := field.GetDefaultValue(); def != "" { - defaultConstant := g.defaultConstantName(goMessageType, field.GetName()) - if *field.Type != descriptor.FieldDescriptorProto_TYPE_BYTES { - return defaultConstant - } - return "append([]byte(nil), " + defaultConstant + "...)" - } - switch *field.Type { - case descriptor.FieldDescriptorProto_TYPE_BOOL: - return "false" - case descriptor.FieldDescriptorProto_TYPE_STRING: - return `""` - case descriptor.FieldDescriptorProto_TYPE_GROUP, descriptor.FieldDescriptorProto_TYPE_MESSAGE, descriptor.FieldDescriptorProto_TYPE_BYTES: - return "nil" - case descriptor.FieldDescriptorProto_TYPE_ENUM: - obj := g.ObjectNamed(field.GetTypeName()) - var enum *EnumDescriptor - if id, ok := obj.(*ImportedDescriptor); ok { - // The enum type has been publicly imported. - enum, _ = id.o.(*EnumDescriptor) - } else { - enum, _ = obj.(*EnumDescriptor) - } - if enum == nil { - log.Printf("don't know how to generate getter for %s", field.GetName()) - return "nil" - } - if len(enum.Value) == 0 { - return "0 // empty enum" - } - first := enum.Value[0].GetName() - return g.DefaultPackageName(obj) + enum.prefix() + first - default: - return "0" - } -} - -// defaultConstantName builds the name of the default constant from the message -// type name and the untouched field name, e.g. "Default_MessageType_FieldName" -func (g *Generator) defaultConstantName(goMessageType, protoFieldName string) string { - return "Default_" + goMessageType + "_" + CamelCase(protoFieldName) -} - -// The different types of fields in a message and how to actually print them -// Most of the logic for generateMessage is in the methods of these types. -// -// Note that the content of the field is irrelevant, a simpleField can contain -// anything from a scalar to a group (which is just a message). -// -// Extension fields (and message sets) are however handled separately. -// -// simpleField - a field that is neiter weak nor oneof, possibly repeated -// oneofField - field containing list of subfields: -// - oneofSubField - a field within the oneof - -// msgCtx contains the context for the generator functions. -type msgCtx struct { - goName string // Go struct name of the message, e.g. MessageName - message *Descriptor // The descriptor for the message -} - -// fieldCommon contains data common to all types of fields. -type fieldCommon struct { - goName string // Go name of field, e.g. "FieldName" or "Descriptor_" - protoName string // Name of field in proto language, e.g. "field_name" or "descriptor" - getterName string // Name of the getter, e.g. "GetFieldName" or "GetDescriptor_" - goType string // The Go type as a string, e.g. "*int32" or "*OtherMessage" - tags string // The tag string/annotation for the type, e.g. `protobuf:"varint,8,opt,name=region_id,json=regionId"` - fullPath string // The full path of the field as used by Annotate etc, e.g. "4,0,2,0" -} - -// getProtoName gets the proto name of a field, e.g. "field_name" or "descriptor". -func (f *fieldCommon) getProtoName() string { - return f.protoName -} - -// getGoType returns the go type of the field as a string, e.g. "*int32". -func (f *fieldCommon) getGoType() string { - return f.goType -} - -// simpleField is not weak, not a oneof, not an extension. Can be required, optional or repeated. -type simpleField struct { - fieldCommon - protoTypeName string // Proto type name, empty if primitive, e.g. ".google.protobuf.Duration" - protoType descriptor.FieldDescriptorProto_Type // Actual type enum value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64 - deprecated string // Deprecation comment, if any, e.g. "// Deprecated: Do not use." - getterDef string // Default for getters, e.g. "nil", `""` or "Default_MessageType_FieldName" - protoDef string // Default value as defined in the proto file, e.g "yoshi" or "5" - comment string // The full comment for the field, e.g. "// Useful information" -} - -// decl prints the declaration of the field in the struct (if any). -func (f *simpleField) decl(g *Generator, mc *msgCtx) { - g.P(f.comment, Annotate(mc.message.file, f.fullPath, f.goName), "\t", f.goType, "\t`", f.tags, "`", f.deprecated) -} - -// getter prints the getter for the field. -func (f *simpleField) getter(g *Generator, mc *msgCtx) { - star := "" - tname := f.goType - if needsStar(f.protoType) && tname[0] == '*' { - tname = tname[1:] - star = "*" - } - if f.deprecated != "" { - g.P(f.deprecated) - } - g.P("func (m *", mc.goName, ") ", Annotate(mc.message.file, f.fullPath, f.getterName), "() "+tname+" {") - if f.getterDef == "nil" { // Simpler getter - g.P("if m != nil {") - g.P("return m." + f.goName) - g.P("}") - g.P("return nil") - g.P("}") - g.P() - return - } - if mc.message.proto3() { - g.P("if m != nil {") - } else { - g.P("if m != nil && m." + f.goName + " != nil {") - } - g.P("return " + star + "m." + f.goName) - g.P("}") - g.P("return ", f.getterDef) - g.P("}") - g.P() -} - -// setter prints the setter method of the field. -func (f *simpleField) setter(g *Generator, mc *msgCtx) { - // No setter for regular fields yet -} - -// getProtoDef returns the default value explicitly stated in the proto file, e.g "yoshi" or "5". -func (f *simpleField) getProtoDef() string { - return f.protoDef -} - -// getProtoTypeName returns the protobuf type name for the field as returned by field.GetTypeName(), e.g. ".google.protobuf.Duration". -func (f *simpleField) getProtoTypeName() string { - return f.protoTypeName -} - -// getProtoType returns the *field.Type value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64. -func (f *simpleField) getProtoType() descriptor.FieldDescriptorProto_Type { - return f.protoType -} - -// oneofSubFields are kept slize held by each oneofField. They do not appear in the top level slize of fields for the message. -type oneofSubField struct { - fieldCommon - protoTypeName string // Proto type name, empty if primitive, e.g. ".google.protobuf.Duration" - protoType descriptor.FieldDescriptorProto_Type // Actual type enum value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64 - oneofTypeName string // Type name of the enclosing struct, e.g. "MessageName_FieldName" - fieldNumber int // Actual field number, as defined in proto, e.g. 12 - getterDef string // Default for getters, e.g. "nil", `""` or "Default_MessageType_FieldName" - protoDef string // Default value as defined in the proto file, e.g "yoshi" or "5" - deprecated string // Deprecation comment, if any. -} - -// typedNil prints a nil casted to the pointer to this field. -// - for XXX_OneofWrappers -func (f *oneofSubField) typedNil(g *Generator) { - g.P("(*", f.oneofTypeName, ")(nil),") -} - -// getProtoDef returns the default value explicitly stated in the proto file, e.g "yoshi" or "5". -func (f *oneofSubField) getProtoDef() string { - return f.protoDef -} - -// getProtoTypeName returns the protobuf type name for the field as returned by field.GetTypeName(), e.g. ".google.protobuf.Duration". -func (f *oneofSubField) getProtoTypeName() string { - return f.protoTypeName -} - -// getProtoType returns the *field.Type value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64. -func (f *oneofSubField) getProtoType() descriptor.FieldDescriptorProto_Type { - return f.protoType -} - -// oneofField represents the oneof on top level. -// The alternative fields within the oneof are represented by oneofSubField. -type oneofField struct { - fieldCommon - subFields []*oneofSubField // All the possible oneof fields - comment string // The full comment for the field, e.g. "// Types that are valid to be assigned to MyOneof:\n\\" -} - -// decl prints the declaration of the field in the struct (if any). -func (f *oneofField) decl(g *Generator, mc *msgCtx) { - comment := f.comment - for _, sf := range f.subFields { - comment += "//\t*" + sf.oneofTypeName + "\n" - } - g.P(comment, Annotate(mc.message.file, f.fullPath, f.goName), " ", f.goType, " `", f.tags, "`") -} - -// getter for a oneof field will print additional discriminators and interfaces for the oneof, -// also it prints all the getters for the sub fields. -func (f *oneofField) getter(g *Generator, mc *msgCtx) { - // The discriminator type - g.P("type ", f.goType, " interface {") - g.P(f.goType, "()") - g.P("}") - g.P() - // The subField types, fulfilling the discriminator type contract - for _, sf := range f.subFields { - g.P("type ", Annotate(mc.message.file, sf.fullPath, sf.oneofTypeName), " struct {") - g.P(Annotate(mc.message.file, sf.fullPath, sf.goName), " ", sf.goType, " `", sf.tags, "`") - g.P("}") - g.P() - } - for _, sf := range f.subFields { - g.P("func (*", sf.oneofTypeName, ") ", f.goType, "() {}") - g.P() - } - // Getter for the oneof field - g.P("func (m *", mc.goName, ") ", Annotate(mc.message.file, f.fullPath, f.getterName), "() ", f.goType, " {") - g.P("if m != nil { return m.", f.goName, " }") - g.P("return nil") - g.P("}") - g.P() - // Getters for each oneof - for _, sf := range f.subFields { - if sf.deprecated != "" { - g.P(sf.deprecated) - } - g.P("func (m *", mc.goName, ") ", Annotate(mc.message.file, sf.fullPath, sf.getterName), "() "+sf.goType+" {") - g.P("if x, ok := m.", f.getterName, "().(*", sf.oneofTypeName, "); ok {") - g.P("return x.", sf.goName) - g.P("}") - g.P("return ", sf.getterDef) - g.P("}") - g.P() - } -} - -// setter prints the setter method of the field. -func (f *oneofField) setter(g *Generator, mc *msgCtx) { - // No setters for oneof yet -} - -// topLevelField interface implemented by all types of fields on the top level (not oneofSubField). -type topLevelField interface { - decl(g *Generator, mc *msgCtx) // print declaration within the struct - getter(g *Generator, mc *msgCtx) // print getter - setter(g *Generator, mc *msgCtx) // print setter if applicable -} - -// defField interface implemented by all types of fields that can have defaults (not oneofField, but instead oneofSubField). -type defField interface { - getProtoDef() string // default value explicitly stated in the proto file, e.g "yoshi" or "5" - getProtoName() string // proto name of a field, e.g. "field_name" or "descriptor" - getGoType() string // go type of the field as a string, e.g. "*int32" - getProtoTypeName() string // protobuf type name for the field, e.g. ".google.protobuf.Duration" - getProtoType() descriptor.FieldDescriptorProto_Type // *field.Type value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64 -} - -// generateDefaultConstants adds constants for default values if needed, which is only if the default value is. -// explicit in the proto. -func (g *Generator) generateDefaultConstants(mc *msgCtx, topLevelFields []topLevelField) { - // Collect fields that can have defaults - dFields := []defField{} - for _, pf := range topLevelFields { - if f, ok := pf.(*oneofField); ok { - for _, osf := range f.subFields { - dFields = append(dFields, osf) - } - continue - } - dFields = append(dFields, pf.(defField)) - } - for _, df := range dFields { - def := df.getProtoDef() - if def == "" { - continue - } - fieldname := g.defaultConstantName(mc.goName, df.getProtoName()) - typename := df.getGoType() - if typename[0] == '*' { - typename = typename[1:] - } - kind := "const " - switch { - case typename == "bool": - case typename == "string": - def = strconv.Quote(def) - case typename == "[]byte": - def = "[]byte(" + strconv.Quote(unescape(def)) + ")" - kind = "var " - case def == "inf", def == "-inf", def == "nan": - // These names are known to, and defined by, the protocol language. - switch def { - case "inf": - def = "math.Inf(1)" - case "-inf": - def = "math.Inf(-1)" - case "nan": - def = "math.NaN()" - } - if df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_FLOAT { - def = "float32(" + def + ")" - } - kind = "var " - case df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_FLOAT: - if f, err := strconv.ParseFloat(def, 32); err == nil { - def = fmt.Sprint(float32(f)) - } - case df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_DOUBLE: - if f, err := strconv.ParseFloat(def, 64); err == nil { - def = fmt.Sprint(f) - } - case df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_ENUM: - // Must be an enum. Need to construct the prefixed name. - obj := g.ObjectNamed(df.getProtoTypeName()) - var enum *EnumDescriptor - if id, ok := obj.(*ImportedDescriptor); ok { - // The enum type has been publicly imported. - enum, _ = id.o.(*EnumDescriptor) - } else { - enum, _ = obj.(*EnumDescriptor) - } - if enum == nil { - log.Printf("don't know how to generate constant for %s", fieldname) - continue - } - def = g.DefaultPackageName(obj) + enum.prefix() + def - } - g.P(kind, fieldname, " ", typename, " = ", def) - g.file.addExport(mc.message, constOrVarSymbol{fieldname, kind, ""}) - } - g.P() -} - -// generateInternalStructFields just adds the XXX_ fields to the message struct. -func (g *Generator) generateInternalStructFields(mc *msgCtx, topLevelFields []topLevelField) { - g.P("XXX_NoUnkeyedLiteral\tstruct{} `json:\"-\"`") // prevent unkeyed struct literals - if len(mc.message.ExtensionRange) > 0 { - messageset := "" - if opts := mc.message.Options; opts != nil && opts.GetMessageSetWireFormat() { - messageset = "protobuf_messageset:\"1\" " - } - g.P(g.Pkg["proto"], ".XXX_InternalExtensions `", messageset, "json:\"-\"`") - } - g.P("XXX_unrecognized\t[]byte `json:\"-\"`") - g.P("XXX_sizecache\tint32 `json:\"-\"`") - -} - -// generateOneofFuncs adds all the utility functions for oneof, including marshalling, unmarshalling and sizer. -func (g *Generator) generateOneofFuncs(mc *msgCtx, topLevelFields []topLevelField) { - ofields := []*oneofField{} - for _, f := range topLevelFields { - if o, ok := f.(*oneofField); ok { - ofields = append(ofields, o) - } - } - if len(ofields) == 0 { - return - } - - // OneofFuncs - g.P("// XXX_OneofWrappers is for the internal use of the proto package.") - g.P("func (*", mc.goName, ") XXX_OneofWrappers() []interface{} {") - g.P("return []interface{}{") - for _, of := range ofields { - for _, sf := range of.subFields { - sf.typedNil(g) - } - } - g.P("}") - g.P("}") - g.P() -} - -// generateMessageStruct adds the actual struct with it's members (but not methods) to the output. -func (g *Generator) generateMessageStruct(mc *msgCtx, topLevelFields []topLevelField) { - comments := g.PrintComments(mc.message.path) - - // Guarantee deprecation comments appear after user-provided comments. - if mc.message.GetOptions().GetDeprecated() { - if comments { - // Convention: Separate deprecation comments from original - // comments with an empty line. - g.P("//") - } - g.P(deprecationComment) - } - - g.P("type ", Annotate(mc.message.file, mc.message.path, mc.goName), " struct {") - for _, pf := range topLevelFields { - pf.decl(g, mc) - } - g.generateInternalStructFields(mc, topLevelFields) - g.P("}") -} - -// generateGetters adds getters for all fields, including oneofs and weak fields when applicable. -func (g *Generator) generateGetters(mc *msgCtx, topLevelFields []topLevelField) { - for _, pf := range topLevelFields { - pf.getter(g, mc) - } -} - -// generateSetters add setters for all fields, including oneofs and weak fields when applicable. -func (g *Generator) generateSetters(mc *msgCtx, topLevelFields []topLevelField) { - for _, pf := range topLevelFields { - pf.setter(g, mc) - } -} - -// generateCommonMethods adds methods to the message that are not on a per field basis. -func (g *Generator) generateCommonMethods(mc *msgCtx) { - // Reset, String and ProtoMessage methods. - g.P("func (m *", mc.goName, ") Reset() { *m = ", mc.goName, "{} }") - g.P("func (m *", mc.goName, ") String() string { return ", g.Pkg["proto"], ".CompactTextString(m) }") - g.P("func (*", mc.goName, ") ProtoMessage() {}") - var indexes []string - for m := mc.message; m != nil; m = m.parent { - indexes = append([]string{strconv.Itoa(m.index)}, indexes...) - } - g.P("func (*", mc.goName, ") Descriptor() ([]byte, []int) {") - g.P("return ", g.file.VarName(), ", []int{", strings.Join(indexes, ", "), "}") - g.P("}") - g.P() - // TODO: Revisit the decision to use a XXX_WellKnownType method - // if we change proto.MessageName to work with multiple equivalents. - if mc.message.file.GetPackage() == "google.protobuf" && wellKnownTypes[mc.message.GetName()] { - g.P("func (*", mc.goName, `) XXX_WellKnownType() string { return "`, mc.message.GetName(), `" }`) - g.P() - } - - // Extension support methods - if len(mc.message.ExtensionRange) > 0 { - g.P() - g.P("var extRange_", mc.goName, " = []", g.Pkg["proto"], ".ExtensionRange{") - for _, r := range mc.message.ExtensionRange { - end := fmt.Sprint(*r.End - 1) // make range inclusive on both ends - g.P("{Start: ", r.Start, ", End: ", end, "},") - } - g.P("}") - g.P("func (*", mc.goName, ") ExtensionRangeArray() []", g.Pkg["proto"], ".ExtensionRange {") - g.P("return extRange_", mc.goName) - g.P("}") - g.P() - } - - // TODO: It does not scale to keep adding another method for every - // operation on protos that we want to switch over to using the - // table-driven approach. Instead, we should only add a single method - // that allows getting access to the *InternalMessageInfo struct and then - // calling Unmarshal, Marshal, Merge, Size, and Discard directly on that. - - // Wrapper for table-driven marshaling and unmarshaling. - g.P("func (m *", mc.goName, ") XXX_Unmarshal(b []byte) error {") - g.P("return xxx_messageInfo_", mc.goName, ".Unmarshal(m, b)") - g.P("}") - - g.P("func (m *", mc.goName, ") XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {") - g.P("return xxx_messageInfo_", mc.goName, ".Marshal(b, m, deterministic)") - g.P("}") - - g.P("func (m *", mc.goName, ") XXX_Merge(src ", g.Pkg["proto"], ".Message) {") - g.P("xxx_messageInfo_", mc.goName, ".Merge(m, src)") - g.P("}") - - g.P("func (m *", mc.goName, ") XXX_Size() int {") // avoid name clash with "Size" field in some message - g.P("return xxx_messageInfo_", mc.goName, ".Size(m)") - g.P("}") - - g.P("func (m *", mc.goName, ") XXX_DiscardUnknown() {") - g.P("xxx_messageInfo_", mc.goName, ".DiscardUnknown(m)") - g.P("}") - - g.P("var xxx_messageInfo_", mc.goName, " ", g.Pkg["proto"], ".InternalMessageInfo") - g.P() -} - -// Generate the type, methods and default constant definitions for this Descriptor. -func (g *Generator) generateMessage(message *Descriptor) { - topLevelFields := []topLevelField{} - oFields := make(map[int32]*oneofField) - // The full type name - typeName := message.TypeName() - // The full type name, CamelCased. - goTypeName := CamelCaseSlice(typeName) - - usedNames := make(map[string]bool) - for _, n := range methodNames { - usedNames[n] = true - } - - // allocNames finds a conflict-free variation of the given strings, - // consistently mutating their suffixes. - // It returns the same number of strings. - allocNames := func(ns ...string) []string { - Loop: - for { - for _, n := range ns { - if usedNames[n] { - for i := range ns { - ns[i] += "_" - } - continue Loop - } - } - for _, n := range ns { - usedNames[n] = true - } - return ns - } - } - - mapFieldTypes := make(map[*descriptor.FieldDescriptorProto]string) // keep track of the map fields to be added later - - // Build a structure more suitable for generating the text in one pass - for i, field := range message.Field { - // Allocate the getter and the field at the same time so name - // collisions create field/method consistent names. - // TODO: This allocation occurs based on the order of the fields - // in the proto file, meaning that a change in the field - // ordering can change generated Method/Field names. - base := CamelCase(*field.Name) - ns := allocNames(base, "Get"+base) - fieldName, fieldGetterName := ns[0], ns[1] - typename, wiretype := g.GoType(message, field) - jsonName := *field.Name - tag := fmt.Sprintf("protobuf:%s json:%q", g.goTag(message, field, wiretype), jsonName+",omitempty") - - oneof := field.OneofIndex != nil - if oneof && oFields[*field.OneofIndex] == nil { - odp := message.OneofDecl[int(*field.OneofIndex)] - base := CamelCase(odp.GetName()) - fname := allocNames(base)[0] - - // This is the first field of a oneof we haven't seen before. - // Generate the union field. - oneofFullPath := fmt.Sprintf("%s,%d,%d", message.path, messageOneofPath, *field.OneofIndex) - c, ok := g.makeComments(oneofFullPath) - if ok { - c += "\n//\n" - } - c += "// Types that are valid to be assigned to " + fname + ":\n" - // Generate the rest of this comment later, - // when we've computed any disambiguation. - - dname := "is" + goTypeName + "_" + fname - tag := `protobuf_oneof:"` + odp.GetName() + `"` - of := oneofField{ - fieldCommon: fieldCommon{ - goName: fname, - getterName: "Get"+fname, - goType: dname, - tags: tag, - protoName: odp.GetName(), - fullPath: oneofFullPath, - }, - comment: c, - } - topLevelFields = append(topLevelFields, &of) - oFields[*field.OneofIndex] = &of - } - - if *field.Type == descriptor.FieldDescriptorProto_TYPE_MESSAGE { - desc := g.ObjectNamed(field.GetTypeName()) - if d, ok := desc.(*Descriptor); ok && d.GetOptions().GetMapEntry() { - // Figure out the Go types and tags for the key and value types. - keyField, valField := d.Field[0], d.Field[1] - keyType, keyWire := g.GoType(d, keyField) - valType, valWire := g.GoType(d, valField) - keyTag, valTag := g.goTag(d, keyField, keyWire), g.goTag(d, valField, valWire) - - // We don't use stars, except for message-typed values. - // Message and enum types are the only two possibly foreign types used in maps, - // so record their use. They are not permitted as map keys. - keyType = strings.TrimPrefix(keyType, "*") - switch *valField.Type { - case descriptor.FieldDescriptorProto_TYPE_ENUM: - valType = strings.TrimPrefix(valType, "*") - g.RecordTypeUse(valField.GetTypeName()) - case descriptor.FieldDescriptorProto_TYPE_MESSAGE: - g.RecordTypeUse(valField.GetTypeName()) - default: - valType = strings.TrimPrefix(valType, "*") - } - - typename = fmt.Sprintf("map[%s]%s", keyType, valType) - mapFieldTypes[field] = typename // record for the getter generation - - tag += fmt.Sprintf(" protobuf_key:%s protobuf_val:%s", keyTag, valTag) - } - } - - fieldDeprecated := "" - if field.GetOptions().GetDeprecated() { - fieldDeprecated = deprecationComment - } - - dvalue := g.getterDefault(field, goTypeName) - if oneof { - tname := goTypeName + "_" + fieldName - // It is possible for this to collide with a message or enum - // nested in this message. Check for collisions. - for { - ok := true - for _, desc := range message.nested { - if CamelCaseSlice(desc.TypeName()) == tname { - ok = false - break - } - } - for _, enum := range message.enums { - if CamelCaseSlice(enum.TypeName()) == tname { - ok = false - break - } - } - if !ok { - tname += "_" - continue - } - break - } - - oneofField := oFields[*field.OneofIndex] - tag := "protobuf:" + g.goTag(message, field, wiretype) - sf := oneofSubField{ - fieldCommon: fieldCommon{ - goName: fieldName, - getterName: fieldGetterName, - goType: typename, - tags: tag, - protoName: field.GetName(), - fullPath: fmt.Sprintf("%s,%d,%d", message.path, messageFieldPath, i), - }, - protoTypeName: field.GetTypeName(), - fieldNumber: int(*field.Number), - protoType: *field.Type, - getterDef: dvalue, - protoDef: field.GetDefaultValue(), - oneofTypeName: tname, - deprecated: fieldDeprecated, - } - oneofField.subFields = append(oneofField.subFields, &sf) - g.RecordTypeUse(field.GetTypeName()) - continue - } - - fieldFullPath := fmt.Sprintf("%s,%d,%d", message.path, messageFieldPath, i) - c, ok := g.makeComments(fieldFullPath) - if ok { - c += "\n" - } - rf := simpleField{ - fieldCommon: fieldCommon{ - goName: fieldName, - getterName: fieldGetterName, - goType: typename, - tags: tag, - protoName: field.GetName(), - fullPath: fieldFullPath, - }, - protoTypeName: field.GetTypeName(), - protoType: *field.Type, - deprecated: fieldDeprecated, - getterDef: dvalue, - protoDef: field.GetDefaultValue(), - comment: c, - } - var pf topLevelField = &rf - - topLevelFields = append(topLevelFields, pf) - g.RecordTypeUse(field.GetTypeName()) - } - - mc := &msgCtx{ - goName: goTypeName, - message: message, - } - - g.generateMessageStruct(mc, topLevelFields) - g.P() - g.generateCommonMethods(mc) - g.P() - g.generateDefaultConstants(mc, topLevelFields) - g.P() - g.generateGetters(mc, topLevelFields) - g.P() - g.generateSetters(mc, topLevelFields) - g.P() - g.generateOneofFuncs(mc, topLevelFields) - g.P() - - var oneofTypes []string - for _, f := range topLevelFields { - if of, ok := f.(*oneofField); ok { - for _, osf := range of.subFields { - oneofTypes = append(oneofTypes, osf.oneofTypeName) - } - } - } - - opts := message.Options - ms := &messageSymbol{ - sym: goTypeName, - hasExtensions: len(message.ExtensionRange) > 0, - isMessageSet: opts != nil && opts.GetMessageSetWireFormat(), - oneofTypes: oneofTypes, - } - g.file.addExport(message, ms) - - for _, ext := range message.ext { - g.generateExtension(ext) - } - - fullName := strings.Join(message.TypeName(), ".") - if g.file.Package != nil { - fullName = *g.file.Package + "." + fullName - } - - g.addInitf("%s.RegisterType((*%s)(nil), %q)", g.Pkg["proto"], goTypeName, fullName) - // Register types for native map types. - for _, k := range mapFieldKeys(mapFieldTypes) { - fullName := strings.TrimPrefix(*k.TypeName, ".") - g.addInitf("%s.RegisterMapType((%s)(nil), %q)", g.Pkg["proto"], mapFieldTypes[k], fullName) - } - -} - -type byTypeName []*descriptor.FieldDescriptorProto - -func (a byTypeName) Len() int { return len(a) } -func (a byTypeName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byTypeName) Less(i, j int) bool { return *a[i].TypeName < *a[j].TypeName } - -// mapFieldKeys returns the keys of m in a consistent order. -func mapFieldKeys(m map[*descriptor.FieldDescriptorProto]string) []*descriptor.FieldDescriptorProto { - keys := make([]*descriptor.FieldDescriptorProto, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - sort.Sort(byTypeName(keys)) - return keys -} - -var escapeChars = [256]byte{ - 'a': '\a', 'b': '\b', 'f': '\f', 'n': '\n', 'r': '\r', 't': '\t', 'v': '\v', '\\': '\\', '"': '"', '\'': '\'', '?': '?', -} - -// unescape reverses the "C" escaping that protoc does for default values of bytes fields. -// It is best effort in that it effectively ignores malformed input. Seemingly invalid escape -// sequences are conveyed, unmodified, into the decoded result. -func unescape(s string) string { - // NB: Sadly, we can't use strconv.Unquote because protoc will escape both - // single and double quotes, but strconv.Unquote only allows one or the - // other (based on actual surrounding quotes of its input argument). - - var out []byte - for len(s) > 0 { - // regular character, or too short to be valid escape - if s[0] != '\\' || len(s) < 2 { - out = append(out, s[0]) - s = s[1:] - } else if c := escapeChars[s[1]]; c != 0 { - // escape sequence - out = append(out, c) - s = s[2:] - } else if s[1] == 'x' || s[1] == 'X' { - // hex escape, e.g. "\x80 - if len(s) < 4 { - // too short to be valid - out = append(out, s[:2]...) - s = s[2:] - continue - } - v, err := strconv.ParseUint(s[2:4], 16, 8) - if err != nil { - out = append(out, s[:4]...) - } else { - out = append(out, byte(v)) - } - s = s[4:] - } else if '0' <= s[1] && s[1] <= '7' { - // octal escape, can vary from 1 to 3 octal digits; e.g., "\0" "\40" or "\164" - // so consume up to 2 more bytes or up to end-of-string - n := len(s[1:]) - len(strings.TrimLeft(s[1:], "01234567")) - if n > 3 { - n = 3 - } - v, err := strconv.ParseUint(s[1:1+n], 8, 8) - if err != nil { - out = append(out, s[:1+n]...) - } else { - out = append(out, byte(v)) - } - s = s[1+n:] - } else { - // bad escape, just propagate the slash as-is - out = append(out, s[0]) - s = s[1:] - } - } - - return string(out) -} - -func (g *Generator) generateExtension(ext *ExtensionDescriptor) { - ccTypeName := ext.DescName() - - extObj := g.ObjectNamed(*ext.Extendee) - var extDesc *Descriptor - if id, ok := extObj.(*ImportedDescriptor); ok { - // This is extending a publicly imported message. - // We need the underlying type for goTag. - extDesc = id.o.(*Descriptor) - } else { - extDesc = extObj.(*Descriptor) - } - extendedType := "*" + g.TypeName(extObj) // always use the original - field := ext.FieldDescriptorProto - fieldType, wireType := g.GoType(ext.parent, field) - tag := g.goTag(extDesc, field, wireType) - g.RecordTypeUse(*ext.Extendee) - if n := ext.FieldDescriptorProto.TypeName; n != nil { - // foreign extension type - g.RecordTypeUse(*n) - } - - typeName := ext.TypeName() - - // Special case for proto2 message sets: If this extension is extending - // proto2.bridge.MessageSet, and its final name component is "message_set_extension", - // then drop that last component. - // - // TODO: This should be implemented in the text formatter rather than the generator. - // In addition, the situation for when to apply this special case is implemented - // differently in other languages: - // https://github.com/google/protobuf/blob/aff10976/src/google/protobuf/text_format.cc#L1560 - if extDesc.GetOptions().GetMessageSetWireFormat() && typeName[len(typeName)-1] == "message_set_extension" { - typeName = typeName[:len(typeName)-1] - } - - // For text formatting, the package must be exactly what the .proto file declares, - // ignoring overrides such as the go_package option, and with no dot/underscore mapping. - extName := strings.Join(typeName, ".") - if g.file.Package != nil { - extName = *g.file.Package + "." + extName - } - - g.P("var ", ccTypeName, " = &", g.Pkg["proto"], ".ExtensionDesc{") - g.P("ExtendedType: (", extendedType, ")(nil),") - g.P("ExtensionType: (", fieldType, ")(nil),") - g.P("Field: ", field.Number, ",") - g.P(`Name: "`, extName, `",`) - g.P("Tag: ", tag, ",") - g.P(`Filename: "`, g.file.GetName(), `",`) - - g.P("}") - g.P() - - g.addInitf("%s.RegisterExtension(%s)", g.Pkg["proto"], ext.DescName()) - - g.file.addExport(ext, constOrVarSymbol{ccTypeName, "var", ""}) -} - -func (g *Generator) generateInitFunction() { - if len(g.init) == 0 { - return - } - g.P("func init() {") - for _, l := range g.init { - g.P(l) - } - g.P("}") - g.init = nil -} - -func (g *Generator) generateFileDescriptor(file *FileDescriptor) { - // Make a copy and trim source_code_info data. - // TODO: Trim this more when we know exactly what we need. - pb := proto.Clone(file.FileDescriptorProto).(*descriptor.FileDescriptorProto) - pb.SourceCodeInfo = nil - - b, err := proto.Marshal(pb) - if err != nil { - g.Fail(err.Error()) - } - - var buf bytes.Buffer - w, _ := gzip.NewWriterLevel(&buf, gzip.BestCompression) - w.Write(b) - w.Close() - b = buf.Bytes() - - v := file.VarName() - g.P() - g.P("func init() { ", g.Pkg["proto"], ".RegisterFile(", strconv.Quote(*file.Name), ", ", v, ") }") - g.P("var ", v, " = []byte{") - g.P("// ", len(b), " bytes of a gzipped FileDescriptorProto") - for len(b) > 0 { - n := 16 - if n > len(b) { - n = len(b) - } - - s := "" - for _, c := range b[:n] { - s += fmt.Sprintf("0x%02x,", c) - } - g.P(s) - - b = b[n:] - } - g.P("}") -} - -func (g *Generator) generateEnumRegistration(enum *EnumDescriptor) { - // // We always print the full (proto-world) package name here. - pkg := enum.File().GetPackage() - if pkg != "" { - pkg += "." - } - // The full type name - typeName := enum.TypeName() - // The full type name, CamelCased. - ccTypeName := CamelCaseSlice(typeName) - g.addInitf("%s.RegisterEnum(%q, %[3]s_name, %[3]s_value)", g.Pkg["proto"], pkg+ccTypeName, ccTypeName) -} - -// And now lots of helper functions. - -// Is c an ASCII lower-case letter? -func isASCIILower(c byte) bool { - return 'a' <= c && c <= 'z' -} - -// Is c an ASCII digit? -func isASCIIDigit(c byte) bool { - return '0' <= c && c <= '9' -} - -// CamelCase returns the CamelCased name. -// If there is an interior underscore followed by a lower case letter, -// drop the underscore and convert the letter to upper case. -// There is a remote possibility of this rewrite causing a name collision, -// but it's so remote we're prepared to pretend it's nonexistent - since the -// C++ generator lowercases names, it's extremely unlikely to have two fields -// with different capitalizations. -// In short, _my_field_name_2 becomes XMyFieldName_2. -func CamelCase(s string) string { - if s == "" { - return "" - } - t := make([]byte, 0, 32) - i := 0 - if s[0] == '_' { - // Need a capital letter; drop the '_'. - t = append(t, 'X') - i++ - } - // Invariant: if the next letter is lower case, it must be converted - // to upper case. - // That is, we process a word at a time, where words are marked by _ or - // upper case letter. Digits are treated as words. - for ; i < len(s); i++ { - c := s[i] - if c == '_' && i+1 < len(s) && isASCIILower(s[i+1]) { - continue // Skip the underscore in s. - } - if isASCIIDigit(c) { - t = append(t, c) - continue - } - // Assume we have a letter now - if not, it's a bogus identifier. - // The next word is a sequence of characters that must start upper case. - if isASCIILower(c) { - c ^= ' ' // Make it a capital letter. - } - t = append(t, c) // Guaranteed not lower case. - // Accept lower case sequence that follows. - for i+1 < len(s) && isASCIILower(s[i+1]) { - i++ - t = append(t, s[i]) - } - } - return string(t) -} - -// CamelCaseSlice is like CamelCase, but the argument is a slice of strings to -// be joined with "_". -func CamelCaseSlice(elem []string) string { return CamelCase(strings.Join(elem, "_")) } - -// dottedSlice turns a sliced name into a dotted name. -func dottedSlice(elem []string) string { return strings.Join(elem, ".") } - -// Is this field optional? -func isOptional(field *descriptor.FieldDescriptorProto) bool { - return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_OPTIONAL -} - -// Is this field required? -func isRequired(field *descriptor.FieldDescriptorProto) bool { - return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_REQUIRED -} - -// Is this field repeated? -func isRepeated(field *descriptor.FieldDescriptorProto) bool { - return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED -} - -// Is this field a scalar numeric type? -func isScalar(field *descriptor.FieldDescriptorProto) bool { - if field.Type == nil { - return false - } - switch *field.Type { - case descriptor.FieldDescriptorProto_TYPE_DOUBLE, - descriptor.FieldDescriptorProto_TYPE_FLOAT, - descriptor.FieldDescriptorProto_TYPE_INT64, - descriptor.FieldDescriptorProto_TYPE_UINT64, - descriptor.FieldDescriptorProto_TYPE_INT32, - descriptor.FieldDescriptorProto_TYPE_FIXED64, - descriptor.FieldDescriptorProto_TYPE_FIXED32, - descriptor.FieldDescriptorProto_TYPE_BOOL, - descriptor.FieldDescriptorProto_TYPE_UINT32, - descriptor.FieldDescriptorProto_TYPE_ENUM, - descriptor.FieldDescriptorProto_TYPE_SFIXED32, - descriptor.FieldDescriptorProto_TYPE_SFIXED64, - descriptor.FieldDescriptorProto_TYPE_SINT32, - descriptor.FieldDescriptorProto_TYPE_SINT64: - return true - default: - return false - } -} - -// badToUnderscore is the mapping function used to generate Go names from package names, -// which can be dotted in the input .proto file. It replaces non-identifier characters such as -// dot or dash with underscore. -func badToUnderscore(r rune) rune { - if unicode.IsLetter(r) || unicode.IsDigit(r) || r == '_' { - return r - } - return '_' -} - -// baseName returns the last path element of the name, with the last dotted suffix removed. -func baseName(name string) string { - // First, find the last element - if i := strings.LastIndex(name, "/"); i >= 0 { - name = name[i+1:] - } - // Now drop the suffix - if i := strings.LastIndex(name, "."); i >= 0 { - name = name[0:i] - } - return name -} - -// The SourceCodeInfo message describes the location of elements of a parsed -// .proto file by way of a "path", which is a sequence of integers that -// describe the route from a FileDescriptorProto to the relevant submessage. -// The path alternates between a field number of a repeated field, and an index -// into that repeated field. The constants below define the field numbers that -// are used. -// -// See descriptor.proto for more information about this. -const ( - // tag numbers in FileDescriptorProto - packagePath = 2 // package - messagePath = 4 // message_type - enumPath = 5 // enum_type - // tag numbers in DescriptorProto - messageFieldPath = 2 // field - messageMessagePath = 3 // nested_type - messageEnumPath = 4 // enum_type - messageOneofPath = 8 // oneof_decl - // tag numbers in EnumDescriptorProto - enumValuePath = 2 // value -) - -var supportTypeAliases bool - -func init() { - for _, tag := range build.Default.ReleaseTags { - if tag == "go1.9" { - supportTypeAliases = true - return - } - } -} diff --git a/cmd/bpa-operator/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go b/cmd/bpa-operator/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go deleted file mode 100644 index a9b6103..0000000 --- a/cmd/bpa-operator/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go +++ /dev/null @@ -1,117 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2017 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package remap handles tracking the locations of Go tokens in a source text -across a rewrite by the Go formatter. -*/ -package remap - -import ( - "fmt" - "go/scanner" - "go/token" -) - -// A Location represents a span of byte offsets in the source text. -type Location struct { - Pos, End int // End is exclusive -} - -// A Map represents a mapping between token locations in an input source text -// and locations in the correspnding output text. -type Map map[Location]Location - -// Find reports whether the specified span is recorded by m, and if so returns -// the new location it was mapped to. If the input span was not found, the -// returned location is the same as the input. -func (m Map) Find(pos, end int) (Location, bool) { - key := Location{ - Pos: pos, - End: end, - } - if loc, ok := m[key]; ok { - return loc, true - } - return key, false -} - -func (m Map) add(opos, oend, npos, nend int) { - m[Location{Pos: opos, End: oend}] = Location{Pos: npos, End: nend} -} - -// Compute constructs a location mapping from input to output. An error is -// reported if any of the tokens of output cannot be mapped. -func Compute(input, output []byte) (Map, error) { - itok := tokenize(input) - otok := tokenize(output) - if len(itok) != len(otok) { - return nil, fmt.Errorf("wrong number of tokens, %d ≠ %d", len(itok), len(otok)) - } - m := make(Map) - for i, ti := range itok { - to := otok[i] - if ti.Token != to.Token { - return nil, fmt.Errorf("token %d type mismatch: %s ≠ %s", i+1, ti, to) - } - m.add(ti.pos, ti.end, to.pos, to.end) - } - return m, nil -} - -// tokinfo records the span and type of a source token. -type tokinfo struct { - pos, end int - token.Token -} - -func tokenize(src []byte) []tokinfo { - fs := token.NewFileSet() - var s scanner.Scanner - s.Init(fs.AddFile("src", fs.Base(), len(src)), src, nil, scanner.ScanComments) - var info []tokinfo - for { - pos, next, lit := s.Scan() - switch next { - case token.SEMICOLON: - continue - } - info = append(info, tokinfo{ - pos: int(pos - 1), - end: int(pos + token.Pos(len(lit)) - 1), - Token: next, - }) - if next == token.EOF { - break - } - } - return info -} diff --git a/cmd/bpa-operator/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go b/cmd/bpa-operator/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go deleted file mode 100644 index 61bfc10..0000000 --- a/cmd/bpa-operator/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go +++ /dev/null @@ -1,369 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/compiler/plugin.proto - -/* -Package plugin_go is a generated protocol buffer package. - -It is generated from these files: - google/protobuf/compiler/plugin.proto - -It has these top-level messages: - Version - CodeGeneratorRequest - CodeGeneratorResponse -*/ -package plugin_go - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// The version number of protocol compiler. -type Version struct { - Major *int32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"` - Minor *int32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"` - Patch *int32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"` - // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should - // be empty for mainline stable releases. - Suffix *string `protobuf:"bytes,4,opt,name=suffix" json:"suffix,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Version) Reset() { *m = Version{} } -func (m *Version) String() string { return proto.CompactTextString(m) } -func (*Version) ProtoMessage() {} -func (*Version) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } -func (m *Version) Unmarshal(b []byte) error { - return xxx_messageInfo_Version.Unmarshal(m, b) -} -func (m *Version) Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Version.Marshal(b, m, deterministic) -} -func (dst *Version) XXX_Merge(src proto.Message) { - xxx_messageInfo_Version.Merge(dst, src) -} -func (m *Version) XXX_Size() int { - return xxx_messageInfo_Version.Size(m) -} -func (m *Version) XXX_DiscardUnknown() { - xxx_messageInfo_Version.DiscardUnknown(m) -} - -var xxx_messageInfo_Version proto.InternalMessageInfo - -func (m *Version) GetMajor() int32 { - if m != nil && m.Major != nil { - return *m.Major - } - return 0 -} - -func (m *Version) GetMinor() int32 { - if m != nil && m.Minor != nil { - return *m.Minor - } - return 0 -} - -func (m *Version) GetPatch() int32 { - if m != nil && m.Patch != nil { - return *m.Patch - } - return 0 -} - -func (m *Version) GetSuffix() string { - if m != nil && m.Suffix != nil { - return *m.Suffix - } - return "" -} - -// An encoded CodeGeneratorRequest is written to the plugin's stdin. -type CodeGeneratorRequest struct { - // The .proto files that were explicitly listed on the command-line. The - // code generator should generate code only for these files. Each file's - // descriptor will be included in proto_file, below. - FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate,json=fileToGenerate" json:"file_to_generate,omitempty"` - // The generator parameter passed on the command-line. - Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"` - // FileDescriptorProtos for all files in files_to_generate and everything - // they import. The files will appear in topological order, so each file - // appears before any file that imports it. - // - // protoc guarantees that all proto_files will be written after - // the fields above, even though this is not technically guaranteed by the - // protobuf wire format. This theoretically could allow a plugin to stream - // in the FileDescriptorProtos and handle them one by one rather than read - // the entire set into memory at once. However, as of this writing, this - // is not similarly optimized on protoc's end -- it will store all fields in - // memory at once before sending them to the plugin. - // - // Type names of fields and extensions in the FileDescriptorProto are always - // fully qualified. - ProtoFile []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file,json=protoFile" json:"proto_file,omitempty"` - // The version number of protocol compiler. - CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CodeGeneratorRequest) Reset() { *m = CodeGeneratorRequest{} } -func (m *CodeGeneratorRequest) String() string { return proto.CompactTextString(m) } -func (*CodeGeneratorRequest) ProtoMessage() {} -func (*CodeGeneratorRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } -func (m *CodeGeneratorRequest) Unmarshal(b []byte) error { - return xxx_messageInfo_CodeGeneratorRequest.Unmarshal(m, b) -} -func (m *CodeGeneratorRequest) Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CodeGeneratorRequest.Marshal(b, m, deterministic) -} -func (dst *CodeGeneratorRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CodeGeneratorRequest.Merge(dst, src) -} -func (m *CodeGeneratorRequest) XXX_Size() int { - return xxx_messageInfo_CodeGeneratorRequest.Size(m) -} -func (m *CodeGeneratorRequest) XXX_DiscardUnknown() { - xxx_messageInfo_CodeGeneratorRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_CodeGeneratorRequest proto.InternalMessageInfo - -func (m *CodeGeneratorRequest) GetFileToGenerate() []string { - if m != nil { - return m.FileToGenerate - } - return nil -} - -func (m *CodeGeneratorRequest) GetParameter() string { - if m != nil && m.Parameter != nil { - return *m.Parameter - } - return "" -} - -func (m *CodeGeneratorRequest) GetProtoFile() []*google_protobuf.FileDescriptorProto { - if m != nil { - return m.ProtoFile - } - return nil -} - -func (m *CodeGeneratorRequest) GetCompilerVersion() *Version { - if m != nil { - return m.CompilerVersion - } - return nil -} - -// The plugin writes an encoded CodeGeneratorResponse to stdout. -type CodeGeneratorResponse struct { - // Error message. If non-empty, code generation failed. The plugin process - // should exit with status code zero even if it reports an error in this way. - // - // This should be used to indicate errors in .proto files which prevent the - // code generator from generating correct code. Errors which indicate a - // problem in protoc itself -- such as the input CodeGeneratorRequest being - // unparseable -- should be reported by writing a message to stderr and - // exiting with a non-zero status code. - Error *string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` - File []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CodeGeneratorResponse) Reset() { *m = CodeGeneratorResponse{} } -func (m *CodeGeneratorResponse) String() string { return proto.CompactTextString(m) } -func (*CodeGeneratorResponse) ProtoMessage() {} -func (*CodeGeneratorResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } -func (m *CodeGeneratorResponse) Unmarshal(b []byte) error { - return xxx_messageInfo_CodeGeneratorResponse.Unmarshal(m, b) -} -func (m *CodeGeneratorResponse) Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CodeGeneratorResponse.Marshal(b, m, deterministic) -} -func (dst *CodeGeneratorResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_CodeGeneratorResponse.Merge(dst, src) -} -func (m *CodeGeneratorResponse) XXX_Size() int { - return xxx_messageInfo_CodeGeneratorResponse.Size(m) -} -func (m *CodeGeneratorResponse) XXX_DiscardUnknown() { - xxx_messageInfo_CodeGeneratorResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_CodeGeneratorResponse proto.InternalMessageInfo - -func (m *CodeGeneratorResponse) GetError() string { - if m != nil && m.Error != nil { - return *m.Error - } - return "" -} - -func (m *CodeGeneratorResponse) GetFile() []*CodeGeneratorResponse_File { - if m != nil { - return m.File - } - return nil -} - -// Represents a single generated file. -type CodeGeneratorResponse_File struct { - // The file name, relative to the output directory. The name must not - // contain "." or ".." components and must be relative, not be absolute (so, - // the file cannot lie outside the output directory). "/" must be used as - // the path separator, not "\". - // - // If the name is omitted, the content will be appended to the previous - // file. This allows the generator to break large files into small chunks, - // and allows the generated text to be streamed back to protoc so that large - // files need not reside completely in memory at one time. Note that as of - // this writing protoc does not optimize for this -- it will read the entire - // CodeGeneratorResponse before writing files to disk. - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // If non-empty, indicates that the named file should already exist, and the - // content here is to be inserted into that file at a defined insertion - // point. This feature allows a code generator to extend the output - // produced by another code generator. The original generator may provide - // insertion points by placing special annotations in the file that look - // like: - // @@protoc_insertion_point(NAME) - // The annotation can have arbitrary text before and after it on the line, - // which allows it to be placed in a comment. NAME should be replaced with - // an identifier naming the point -- this is what other generators will use - // as the insertion_point. Code inserted at this point will be placed - // immediately above the line containing the insertion point (thus multiple - // insertions to the same point will come out in the order they were added). - // The double-@ is intended to make it unlikely that the generated code - // could contain things that look like insertion points by accident. - // - // For example, the C++ code generator places the following line in the - // .pb.h files that it generates: - // // @@protoc_insertion_point(namespace_scope) - // This line appears within the scope of the file's package namespace, but - // outside of any particular class. Another plugin can then specify the - // insertion_point "namespace_scope" to generate additional classes or - // other declarations that should be placed in this scope. - // - // Note that if the line containing the insertion point begins with - // whitespace, the same whitespace will be added to every line of the - // inserted text. This is useful for languages like Python, where - // indentation matters. In these languages, the insertion point comment - // should be indented the same amount as any inserted code will need to be - // in order to work correctly in that context. - // - // The code generator that generates the initial file and the one which - // inserts into it must both run as part of a single invocation of protoc. - // Code generators are executed in the order in which they appear on the - // command line. - // - // If |insertion_point| is present, |name| must also be present. - InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point,json=insertionPoint" json:"insertion_point,omitempty"` - // The file contents. - Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CodeGeneratorResponse_File) Reset() { *m = CodeGeneratorResponse_File{} } -func (m *CodeGeneratorResponse_File) String() string { return proto.CompactTextString(m) } -func (*CodeGeneratorResponse_File) ProtoMessage() {} -func (*CodeGeneratorResponse_File) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } -func (m *CodeGeneratorResponse_File) Unmarshal(b []byte) error { - return xxx_messageInfo_CodeGeneratorResponse_File.Unmarshal(m, b) -} -func (m *CodeGeneratorResponse_File) Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CodeGeneratorResponse_File.Marshal(b, m, deterministic) -} -func (dst *CodeGeneratorResponse_File) XXX_Merge(src proto.Message) { - xxx_messageInfo_CodeGeneratorResponse_File.Merge(dst, src) -} -func (m *CodeGeneratorResponse_File) XXX_Size() int { - return xxx_messageInfo_CodeGeneratorResponse_File.Size(m) -} -func (m *CodeGeneratorResponse_File) XXX_DiscardUnknown() { - xxx_messageInfo_CodeGeneratorResponse_File.DiscardUnknown(m) -} - -var xxx_messageInfo_CodeGeneratorResponse_File proto.InternalMessageInfo - -func (m *CodeGeneratorResponse_File) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *CodeGeneratorResponse_File) GetInsertionPoint() string { - if m != nil && m.InsertionPoint != nil { - return *m.InsertionPoint - } - return "" -} - -func (m *CodeGeneratorResponse_File) GetContent() string { - if m != nil && m.Content != nil { - return *m.Content - } - return "" -} - -func init() { - proto.RegisterType((*Version)(nil), "google.protobuf.compiler.Version") - proto.RegisterType((*CodeGeneratorRequest)(nil), "google.protobuf.compiler.CodeGeneratorRequest") - proto.RegisterType((*CodeGeneratorResponse)(nil), "google.protobuf.compiler.CodeGeneratorResponse") - proto.RegisterType((*CodeGeneratorResponse_File)(nil), "google.protobuf.compiler.CodeGeneratorResponse.File") -} - -func init() { proto.RegisterFile("google/protobuf/compiler/plugin.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 417 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xcf, 0x6a, 0x14, 0x41, - 0x10, 0xc6, 0x19, 0x77, 0x63, 0x98, 0x8a, 0x64, 0x43, 0x13, 0xa5, 0x09, 0x39, 0x8c, 0x8b, 0xe2, - 0x5c, 0x32, 0x0b, 0xc1, 0x8b, 0x78, 0x4b, 0x44, 0x3d, 0x78, 0x58, 0x1a, 0xf1, 0x20, 0xc8, 0x30, - 0x99, 0xd4, 0x74, 0x5a, 0x66, 0xba, 0xc6, 0xee, 0x1e, 0xf1, 0x49, 0x7d, 0x0f, 0xdf, 0x40, 0xfa, - 0xcf, 0x24, 0xb2, 0xb8, 0xa7, 0xee, 0xef, 0x57, 0xd5, 0xd5, 0x55, 0x1f, 0x05, 0x2f, 0x25, 0x91, - 0xec, 0x71, 0x33, 0x1a, 0x72, 0x74, 0x33, 0x75, 0x9b, 0x96, 0x86, 0x51, 0xf5, 0x68, 0x36, 0x63, - 0x3f, 0x49, 0xa5, 0xab, 0x10, 0x60, 0x3c, 0xa6, 0x55, 0x73, 0x5a, 0x35, 0xa7, 0x9d, 0x15, 0xbb, - 0x05, 0x6e, 0xd1, 0xb6, 0x46, 0x8d, 0x8e, 0x4c, 0xcc, 0x5e, 0xb7, 0x70, 0xf8, 0x05, 0x8d, 0x55, - 0xa4, 0xd9, 0x29, 0x1c, 0x0c, 0xcd, 0x77, 0x32, 0x3c, 0x2b, 0xb2, 0xf2, 0x40, 0x44, 0x11, 0xa8, - 0xd2, 0x64, 0xf8, 0xa3, 0x44, 0xbd, 0xf0, 0x74, 0x6c, 0x5c, 0x7b, 0xc7, 0x17, 0x91, 0x06, 0xc1, - 0x9e, 0xc1, 0x63, 0x3b, 0x75, 0x9d, 0xfa, 0xc5, 0x97, 0x45, 0x56, 0xe6, 0x22, 0xa9, 0xf5, 0x9f, - 0x0c, 0x4e, 0xaf, 0xe9, 0x16, 0x3f, 0xa0, 0x46, 0xd3, 0x38, 0x32, 0x02, 0x7f, 0x4c, 0x68, 0x1d, - 0x2b, 0xe1, 0xa4, 0x53, 0x3d, 0xd6, 0x8e, 0x6a, 0x19, 0x63, 0xc8, 0xb3, 0x62, 0x51, 0xe6, 0xe2, - 0xd8, 0xf3, 0xcf, 0x94, 0x5e, 0x20, 0x3b, 0x87, 0x7c, 0x6c, 0x4c, 0x33, 0xa0, 0xc3, 0xd8, 0x4a, - 0x2e, 0x1e, 0x00, 0xbb, 0x06, 0x08, 0xe3, 0xd4, 0xfe, 0x15, 0x5f, 0x15, 0x8b, 0xf2, 0xe8, 0xf2, - 0x45, 0xb5, 0x6b, 0xcb, 0x7b, 0xd5, 0xe3, 0xbb, 0x7b, 0x03, 0xb6, 0x1e, 0x8b, 0x3c, 0x44, 0x7d, - 0x84, 0x7d, 0x82, 0x93, 0xd9, 0xb8, 0xfa, 0x67, 0xf4, 0x24, 0x8c, 0x77, 0x74, 0xf9, 0xbc, 0xda, - 0xe7, 0x70, 0x95, 0xcc, 0x13, 0xab, 0x99, 0x24, 0xb0, 0xfe, 0x9d, 0xc1, 0xd3, 0x9d, 0x99, 0xed, - 0x48, 0xda, 0xa2, 0xf7, 0x0e, 0x8d, 0x49, 0x3e, 0xe7, 0x22, 0x0a, 0xf6, 0x11, 0x96, 0xff, 0x34, - 0xff, 0x7a, 0xff, 0x8f, 0xff, 0x2d, 0x1a, 0x66, 0x13, 0xa1, 0xc2, 0xd9, 0x37, 0x58, 0x86, 0x79, - 0x18, 0x2c, 0x75, 0x33, 0x60, 0xfa, 0x26, 0xdc, 0xd9, 0x2b, 0x58, 0x29, 0x6d, 0xd1, 0x38, 0x45, - 0xba, 0x1e, 0x49, 0x69, 0x97, 0xcc, 0x3c, 0xbe, 0xc7, 0x5b, 0x4f, 0x19, 0x87, 0xc3, 0x96, 0xb4, - 0x43, 0xed, 0xf8, 0x2a, 0x24, 0xcc, 0xf2, 0x4a, 0xc2, 0x79, 0x4b, 0xc3, 0xde, 0xfe, 0xae, 0x9e, - 0x6c, 0xc3, 0x6e, 0x06, 0x7b, 0xed, 0xd7, 0x37, 0x52, 0xb9, 0xbb, 0xe9, 0xc6, 0x87, 0x37, 0x92, - 0xfa, 0x46, 0xcb, 0x87, 0x65, 0x0c, 0x97, 0xf6, 0x42, 0xa2, 0xbe, 0x90, 0x94, 0x56, 0xfa, 0x6d, - 0x3c, 0x6a, 0x49, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x15, 0x40, 0xc5, 0xfe, 0x02, 0x00, - 0x00, -} diff --git a/cmd/bpa-operator/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden b/cmd/bpa-operator/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden deleted file mode 100644 index 8953d0f..0000000 --- a/cmd/bpa-operator/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden +++ /dev/null @@ -1,83 +0,0 @@ -// Code generated by protoc-gen-go. -// source: google/protobuf/compiler/plugin.proto -// DO NOT EDIT! - -package google_protobuf_compiler - -import proto "github.com/golang/protobuf/proto" -import "math" -import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor" - -// Reference proto and math imports to suppress error if they are not otherwise used. -var _ = proto.GetString -var _ = math.Inf - -type CodeGeneratorRequest struct { - FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate" json:"file_to_generate,omitempty"` - Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"` - ProtoFile []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file" json:"proto_file,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (this *CodeGeneratorRequest) Reset() { *this = CodeGeneratorRequest{} } -func (this *CodeGeneratorRequest) String() string { return proto.CompactTextString(this) } -func (*CodeGeneratorRequest) ProtoMessage() {} - -func (this *CodeGeneratorRequest) GetParameter() string { - if this != nil && this.Parameter != nil { - return *this.Parameter - } - return "" -} - -type CodeGeneratorResponse struct { - Error *string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` - File []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (this *CodeGeneratorResponse) Reset() { *this = CodeGeneratorResponse{} } -func (this *CodeGeneratorResponse) String() string { return proto.CompactTextString(this) } -func (*CodeGeneratorResponse) ProtoMessage() {} - -func (this *CodeGeneratorResponse) GetError() string { - if this != nil && this.Error != nil { - return *this.Error - } - return "" -} - -type CodeGeneratorResponse_File struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point" json:"insertion_point,omitempty"` - Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (this *CodeGeneratorResponse_File) Reset() { *this = CodeGeneratorResponse_File{} } -func (this *CodeGeneratorResponse_File) String() string { return proto.CompactTextString(this) } -func (*CodeGeneratorResponse_File) ProtoMessage() {} - -func (this *CodeGeneratorResponse_File) GetName() string { - if this != nil && this.Name != nil { - return *this.Name - } - return "" -} - -func (this *CodeGeneratorResponse_File) GetInsertionPoint() string { - if this != nil && this.InsertionPoint != nil { - return *this.InsertionPoint - } - return "" -} - -func (this *CodeGeneratorResponse_File) GetContent() string { - if this != nil && this.Content != nil { - return *this.Content - } - return "" -} - -func init() { -} diff --git a/cmd/bpa-operator/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto b/cmd/bpa-operator/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto deleted file mode 100644 index 5b55745..0000000 --- a/cmd/bpa-operator/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto +++ /dev/null @@ -1,167 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Author: kenton@google.com (Kenton Varda) -// -// WARNING: The plugin interface is currently EXPERIMENTAL and is subject to -// change. -// -// protoc (aka the Protocol Compiler) can be extended via plugins. A plugin is -// just a program that reads a CodeGeneratorRequest from stdin and writes a -// CodeGeneratorResponse to stdout. -// -// Plugins written using C++ can use google/protobuf/compiler/plugin.h instead -// of dealing with the raw protocol defined here. -// -// A plugin executable needs only to be placed somewhere in the path. The -// plugin should be named "protoc-gen-$NAME", and will then be used when the -// flag "--${NAME}_out" is passed to protoc. - -syntax = "proto2"; -package google.protobuf.compiler; -option java_package = "com.google.protobuf.compiler"; -option java_outer_classname = "PluginProtos"; - -option go_package = "github.com/golang/protobuf/protoc-gen-go/plugin;plugin_go"; - -import "google/protobuf/descriptor.proto"; - -// The version number of protocol compiler. -message Version { - optional int32 major = 1; - optional int32 minor = 2; - optional int32 patch = 3; - // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should - // be empty for mainline stable releases. - optional string suffix = 4; -} - -// An encoded CodeGeneratorRequest is written to the plugin's stdin. -message CodeGeneratorRequest { - // The .proto files that were explicitly listed on the command-line. The - // code generator should generate code only for these files. Each file's - // descriptor will be included in proto_file, below. - repeated string file_to_generate = 1; - - // The generator parameter passed on the command-line. - optional string parameter = 2; - - // FileDescriptorProtos for all files in files_to_generate and everything - // they import. The files will appear in topological order, so each file - // appears before any file that imports it. - // - // protoc guarantees that all proto_files will be written after - // the fields above, even though this is not technically guaranteed by the - // protobuf wire format. This theoretically could allow a plugin to stream - // in the FileDescriptorProtos and handle them one by one rather than read - // the entire set into memory at once. However, as of this writing, this - // is not similarly optimized on protoc's end -- it will store all fields in - // memory at once before sending them to the plugin. - // - // Type names of fields and extensions in the FileDescriptorProto are always - // fully qualified. - repeated FileDescriptorProto proto_file = 15; - - // The version number of protocol compiler. - optional Version compiler_version = 3; - -} - -// The plugin writes an encoded CodeGeneratorResponse to stdout. -message CodeGeneratorResponse { - // Error message. If non-empty, code generation failed. The plugin process - // should exit with status code zero even if it reports an error in this way. - // - // This should be used to indicate errors in .proto files which prevent the - // code generator from generating correct code. Errors which indicate a - // problem in protoc itself -- such as the input CodeGeneratorRequest being - // unparseable -- should be reported by writing a message to stderr and - // exiting with a non-zero status code. - optional string error = 1; - - // Represents a single generated file. - message File { - // The file name, relative to the output directory. The name must not - // contain "." or ".." components and must be relative, not be absolute (so, - // the file cannot lie outside the output directory). "/" must be used as - // the path separator, not "\". - // - // If the name is omitted, the content will be appended to the previous - // file. This allows the generator to break large files into small chunks, - // and allows the generated text to be streamed back to protoc so that large - // files need not reside completely in memory at one time. Note that as of - // this writing protoc does not optimize for this -- it will read the entire - // CodeGeneratorResponse before writing files to disk. - optional string name = 1; - - // If non-empty, indicates that the named file should already exist, and the - // content here is to be inserted into that file at a defined insertion - // point. This feature allows a code generator to extend the output - // produced by another code generator. The original generator may provide - // insertion points by placing special annotations in the file that look - // like: - // @@protoc_insertion_point(NAME) - // The annotation can have arbitrary text before and after it on the line, - // which allows it to be placed in a comment. NAME should be replaced with - // an identifier naming the point -- this is what other generators will use - // as the insertion_point. Code inserted at this point will be placed - // immediately above the line containing the insertion point (thus multiple - // insertions to the same point will come out in the order they were added). - // The double-@ is intended to make it unlikely that the generated code - // could contain things that look like insertion points by accident. - // - // For example, the C++ code generator places the following line in the - // .pb.h files that it generates: - // // @@protoc_insertion_point(namespace_scope) - // This line appears within the scope of the file's package namespace, but - // outside of any particular class. Another plugin can then specify the - // insertion_point "namespace_scope" to generate additional classes or - // other declarations that should be placed in this scope. - // - // Note that if the line containing the insertion point begins with - // whitespace, the same whitespace will be added to every line of the - // inserted text. This is useful for languages like Python, where - // indentation matters. In these languages, the insertion point comment - // should be indented the same amount as any inserted code will need to be - // in order to work correctly in that context. - // - // The code generator that generates the initial file and the one which - // inserts into it must both run as part of a single invocation of protoc. - // Code generators are executed in the order in which they appear on the - // command line. - // - // If |insertion_point| is present, |name| must also be present. - optional string insertion_point = 2; - - // The file contents. - optional string content = 15; - } - repeated File file = 15; -} diff --git a/cmd/bpa-operator/vendor/github.com/golang/protobuf/ptypes/any.go b/cmd/bpa-operator/vendor/github.com/golang/protobuf/ptypes/any.go deleted file mode 100644 index 70276e8..0000000 --- a/cmd/bpa-operator/vendor/github.com/golang/protobuf/ptypes/any.go +++ /dev/null @@ -1,141 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package ptypes - -// This file implements functions to marshal proto.Message to/from -// google.protobuf.Any message. - -import ( - "fmt" - "reflect" - "strings" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes/any" -) - -const googleApis = "type.googleapis.com/" - -// AnyMessageName returns the name of the message contained in a google.protobuf.Any message. -// -// Note that regular type assertions should be done using the Is -// function. AnyMessageName is provided for less common use cases like filtering a -// sequence of Any messages based on a set of allowed message type names. -func AnyMessageName(any *any.Any) (string, error) { - if any == nil { - return "", fmt.Errorf("message is nil") - } - slash := strings.LastIndex(any.TypeUrl, "/") - if slash < 0 { - return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) - } - return any.TypeUrl[slash+1:], nil -} - -// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any. -func MarshalAny(pb proto.Message) (*any.Any, error) { - value, err := proto.Marshal(pb) - if err != nil { - return nil, err - } - return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil -} - -// DynamicAny is a value that can be passed to UnmarshalAny to automatically -// allocate a proto.Message for the type specified in a google.protobuf.Any -// message. The allocated message is stored in the embedded proto.Message. -// -// Example: -// -// var x ptypes.DynamicAny -// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } -// fmt.Printf("unmarshaled message: %v", x.Message) -type DynamicAny struct { - proto.Message -} - -// Empty returns a new proto.Message of the type specified in a -// google.protobuf.Any message. It returns an error if corresponding message -// type isn't linked in. -func Empty(any *any.Any) (proto.Message, error) { - aname, err := AnyMessageName(any) - if err != nil { - return nil, err - } - - t := proto.MessageType(aname) - if t == nil { - return nil, fmt.Errorf("any: message type %q isn't linked in", aname) - } - return reflect.New(t.Elem()).Interface().(proto.Message), nil -} - -// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any -// message and places the decoded result in pb. It returns an error if type of -// contents of Any message does not match type of pb message. -// -// pb can be a proto.Message, or a *DynamicAny. -func UnmarshalAny(any *any.Any, pb proto.Message) error { - if d, ok := pb.(*DynamicAny); ok { - if d.Message == nil { - var err error - d.Message, err = Empty(any) - if err != nil { - return err - } - } - return UnmarshalAny(any, d.Message) - } - - aname, err := AnyMessageName(any) - if err != nil { - return err - } - - mname := proto.MessageName(pb) - if aname != mname { - return fmt.Errorf("mismatched message type: got %q want %q", aname, mname) - } - return proto.Unmarshal(any.Value, pb) -} - -// Is returns true if any value contains a given message type. -func Is(any *any.Any, pb proto.Message) bool { - // The following is equivalent to AnyMessageName(any) == proto.MessageName(pb), - // but it avoids scanning TypeUrl for the slash. - if any == nil { - return false - } - name := proto.MessageName(pb) - prefix := len(any.TypeUrl) - len(name) - return prefix >= 1 && any.TypeUrl[prefix-1] == '/' && any.TypeUrl[prefix:] == name -} diff --git a/cmd/bpa-operator/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/cmd/bpa-operator/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go deleted file mode 100644 index 78ee523..0000000 --- a/cmd/bpa-operator/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go +++ /dev/null @@ -1,200 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/any.proto - -package any - -import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -// `Any` contains an arbitrary serialized protocol buffer message along with a -// URL that describes the type of the serialized message. -// -// Protobuf library provides support to pack/unpack Any values in the form -// of utility functions or additional generated methods of the Any type. -// -// Example 1: Pack and unpack a message in C++. -// -// Foo foo = ...; -// Any any; -// any.PackFrom(foo); -// ... -// if (any.UnpackTo(&foo)) { -// ... -// } -// -// Example 2: Pack and unpack a message in Java. -// -// Foo foo = ...; -// Any any = Any.pack(foo); -// ... -// if (any.is(Foo.class)) { -// foo = any.unpack(Foo.class); -// } -// -// Example 3: Pack and unpack a message in Python. -// -// foo = Foo(...) -// any = Any() -// any.Pack(foo) -// ... -// if any.Is(Foo.DESCRIPTOR): -// any.Unpack(foo) -// ... -// -// Example 4: Pack and unpack a message in Go -// -// foo := &pb.Foo{...} -// any, err := ptypes.MarshalAny(foo) -// ... -// foo := &pb.Foo{} -// if err := ptypes.UnmarshalAny(any, foo); err != nil { -// ... -// } -// -// The pack methods provided by protobuf library will by default use -// 'type.googleapis.com/full.type.name' as the type URL and the unpack -// methods only use the fully qualified type name after the last '/' -// in the type URL, for example "foo.bar.com/x/y.z" will yield type -// name "y.z". -// -// -// JSON -// ==== -// The JSON representation of an `Any` value uses the regular -// representation of the deserialized, embedded message, with an -// additional field `@type` which contains the type URL. Example: -// -// package google.profile; -// message Person { -// string first_name = 1; -// string last_name = 2; -// } -// -// { -// "@type": "type.googleapis.com/google.profile.Person", -// "firstName": , -// "lastName": -// } -// -// If the embedded message type is well-known and has a custom JSON -// representation, that representation will be embedded adding a field -// `value` which holds the custom JSON in addition to the `@type` -// field. Example (for message [google.protobuf.Duration][]): -// -// { -// "@type": "type.googleapis.com/google.protobuf.Duration", -// "value": "1.212s" -// } -// -type Any struct { - // A URL/resource name that uniquely identifies the type of the serialized - // protocol buffer message. The last segment of the URL's path must represent - // the fully qualified name of the type (as in - // `path/google.protobuf.Duration`). The name should be in a canonical form - // (e.g., leading "." is not accepted). - // - // In practice, teams usually precompile into the binary all types that they - // expect it to use in the context of Any. However, for URLs which use the - // scheme `http`, `https`, or no scheme, one can optionally set up a type - // server that maps type URLs to message definitions as follows: - // - // * If no scheme is provided, `https` is assumed. - // * An HTTP GET on the URL must yield a [google.protobuf.Type][] - // value in binary format, or produce an error. - // * Applications are allowed to cache lookup results based on the - // URL, or have them precompiled into a binary to avoid any - // lookup. Therefore, binary compatibility needs to be preserved - // on changes to types. (Use versioned type names to manage - // breaking changes.) - // - // Note: this functionality is not currently available in the official - // protobuf release, and it is not used for type URLs beginning with - // type.googleapis.com. - // - // Schemes other than `http`, `https` (or the empty scheme) might be - // used with implementation specific semantics. - // - TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` - // Must be a valid serialized protocol buffer of the above specified type. - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Any) Reset() { *m = Any{} } -func (m *Any) String() string { return proto.CompactTextString(m) } -func (*Any) ProtoMessage() {} -func (*Any) Descriptor() ([]byte, []int) { - return fileDescriptor_b53526c13ae22eb4, []int{0} -} - -func (*Any) XXX_WellKnownType() string { return "Any" } - -func (m *Any) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Any.Unmarshal(m, b) -} -func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Any.Marshal(b, m, deterministic) -} -func (m *Any) XXX_Merge(src proto.Message) { - xxx_messageInfo_Any.Merge(m, src) -} -func (m *Any) XXX_Size() int { - return xxx_messageInfo_Any.Size(m) -} -func (m *Any) XXX_DiscardUnknown() { - xxx_messageInfo_Any.DiscardUnknown(m) -} - -var xxx_messageInfo_Any proto.InternalMessageInfo - -func (m *Any) GetTypeUrl() string { - if m != nil { - return m.TypeUrl - } - return "" -} - -func (m *Any) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -func init() { - proto.RegisterType((*Any)(nil), "google.protobuf.Any") -} - -func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4) } - -var fileDescriptor_b53526c13ae22eb4 = []byte{ - // 185 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4, - 0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a, - 0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46, - 0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7, - 0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xca, 0xe7, 0x12, 0x4e, 0xce, - 0xcf, 0xd5, 0x43, 0x33, 0xce, 0x89, 0xc3, 0x31, 0xaf, 0x32, 0x00, 0xc4, 0x09, 0x60, 0x8c, 0x52, - 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, - 0x4b, 0x47, 0xb8, 0xa8, 0x00, 0x64, 0x7a, 0x31, 0xc8, 0x61, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, - 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x8c, 0x0a, 0x80, 0x2a, 0xd1, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce, - 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0x29, 0x4d, 0x62, 0x03, 0xeb, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff, - 0xff, 0x13, 0xf8, 0xe8, 0x42, 0xdd, 0x00, 0x00, 0x00, -} diff --git a/cmd/bpa-operator/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/cmd/bpa-operator/vendor/github.com/golang/protobuf/ptypes/any/any.proto deleted file mode 100644 index 4932942..0000000 --- a/cmd/bpa-operator/vendor/github.com/golang/protobuf/ptypes/any/any.proto +++ /dev/null @@ -1,154 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package google.protobuf; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option go_package = "github.com/golang/protobuf/ptypes/any"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "AnyProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; - -// `Any` contains an arbitrary serialized protocol buffer message along with a -// URL that describes the type of the serialized message. -// -// Protobuf library provides support to pack/unpack Any values in the form -// of utility functions or additional generated methods of the Any type. -// -// Example 1: Pack and unpack a message in C++. -// -// Foo foo = ...; -// Any any; -// any.PackFrom(foo); -// ... -// if (any.UnpackTo(&foo)) { -// ... -// } -// -// Example 2: Pack and unpack a message in Java. -// -// Foo foo = ...; -// Any any = Any.pack(foo); -// ... -// if (any.is(Foo.class)) { -// foo = any.unpack(Foo.class); -// } -// -// Example 3: Pack and unpack a message in Python. -// -// foo = Foo(...) -// any = Any() -// any.Pack(foo) -// ... -// if any.Is(Foo.DESCRIPTOR): -// any.Unpack(foo) -// ... -// -// Example 4: Pack and unpack a message in Go -// -// foo := &pb.Foo{...} -// any, err := ptypes.MarshalAny(foo) -// ... -// foo := &pb.Foo{} -// if err := ptypes.UnmarshalAny(any, foo); err != nil { -// ... -// } -// -// The pack methods provided by protobuf library will by default use -// 'type.googleapis.com/full.type.name' as the type URL and the unpack -// methods only use the fully qualified type name after the last '/' -// in the type URL, for example "foo.bar.com/x/y.z" will yield type -// name "y.z". -// -// -// JSON -// ==== -// The JSON representation of an `Any` value uses the regular -// representation of the deserialized, embedded message, with an -// additional field `@type` which contains the type URL. Example: -// -// package google.profile; -// message Person { -// string first_name = 1; -// string last_name = 2; -// } -// -// { -// "@type": "type.googleapis.com/google.profile.Person", -// "firstName": , -// "lastName": -// } -// -// If the embedded message type is well-known and has a custom JSON -// representation, that representation will be embedded adding a field -// `value` which holds the custom JSON in addition to the `@type` -// field. Example (for message [google.protobuf.Duration][]): -// -// { -// "@type": "type.googleapis.com/google.protobuf.Duration", -// "value": "1.212s" -// } -// -message Any { - // A URL/resource name that uniquely identifies the type of the serialized - // protocol buffer message. The last segment of the URL's path must represent - // the fully qualified name of the type (as in - // `path/google.protobuf.Duration`). The name should be in a canonical form - // (e.g., leading "." is not accepted). - // - // In practice, teams usually precompile into the binary all types that they - // expect it to use in the context of Any. However, for URLs which use the - // scheme `http`, `https`, or no scheme, one can optionally set up a type - // server that maps type URLs to message definitions as follows: - // - // * If no scheme is provided, `https` is assumed. - // * An HTTP GET on the URL must yield a [google.protobuf.Type][] - // value in binary format, or produce an error. - // * Applications are allowed to cache lookup results based on the - // URL, or have them precompiled into a binary to avoid any - // lookup. Therefore, binary compatibility needs to be preserved - // on changes to types. (Use versioned type names to manage - // breaking changes.) - // - // Note: this functionality is not currently available in the official - // protobuf release, and it is not used for type URLs beginning with - // type.googleapis.com. - // - // Schemes other than `http`, `https` (or the empty scheme) might be - // used with implementation specific semantics. - // - string type_url = 1; - - // Must be a valid serialized protocol buffer of the above specified type. - bytes value = 2; -} diff --git a/cmd/bpa-operator/vendor/github.com/golang/protobuf/ptypes/doc.go b/cmd/bpa-operator/vendor/github.com/golang/protobuf/ptypes/doc.go deleted file mode 100644 index c0d595d..0000000 --- a/cmd/bpa-operator/vendor/github.com/golang/protobuf/ptypes/doc.go +++ /dev/null @@ -1,35 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package ptypes contains code for interacting with well-known types. -*/ -package ptypes diff --git a/cmd/bpa-operator/vendor/github.com/golang/protobuf/ptypes/duration.go b/cmd/bpa-operator/vendor/github.com/golang/protobuf/ptypes/duration.go deleted file mode 100644 index 26d1ca2..0000000 --- a/cmd/bpa-operator/vendor/github.com/golang/protobuf/ptypes/duration.go +++ /dev/null @@ -1,102 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package ptypes - -// This file implements conversions between google.protobuf.Duration -// and time.Duration. - -import ( - "errors" - "fmt" - "time" - - durpb "github.com/golang/protobuf/ptypes/duration" -) - -const ( - // Range of a durpb.Duration in seconds, as specified in - // google/protobuf/duration.proto. This is about 10,000 years in seconds. - maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) - minSeconds = -maxSeconds -) - -// validateDuration determines whether the durpb.Duration is valid according to the -// definition in google/protobuf/duration.proto. A valid durpb.Duration -// may still be too large to fit into a time.Duration (the range of durpb.Duration -// is about 10,000 years, and the range of time.Duration is about 290). -func validateDuration(d *durpb.Duration) error { - if d == nil { - return errors.New("duration: nil Duration") - } - if d.Seconds < minSeconds || d.Seconds > maxSeconds { - return fmt.Errorf("duration: %v: seconds out of range", d) - } - if d.Nanos <= -1e9 || d.Nanos >= 1e9 { - return fmt.Errorf("duration: %v: nanos out of range", d) - } - // Seconds and Nanos must have the same sign, unless d.Nanos is zero. - if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { - return fmt.Errorf("duration: %v: seconds and nanos have different signs", d) - } - return nil -} - -// Duration converts a durpb.Duration to a time.Duration. Duration -// returns an error if the durpb.Duration is invalid or is too large to be -// represented in a time.Duration. -func Duration(p *durpb.Duration) (time.Duration, error) { - if err := validateDuration(p); err != nil { - return 0, err - } - d := time.Duration(p.Seconds) * time.Second - if int64(d/time.Second) != p.Seconds { - return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) - } - if p.Nanos != 0 { - d += time.Duration(p.Nanos) * time.Nanosecond - if (d < 0) != (p.Nanos < 0) { - return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) - } - } - return d, nil -} - -// DurationProto converts a time.Duration to a durpb.Duration. -func DurationProto(d time.Duration) *durpb.Duration { - nanos := d.Nanoseconds() - secs := nanos / 1e9 - nanos -= secs * 1e9 - return &durpb.Duration{ - Seconds: secs, - Nanos: int32(nanos), - } -} diff --git a/cmd/bpa-operator/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/cmd/bpa-operator/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go deleted file mode 100644 index 0d681ee..0000000 --- a/cmd/bpa-operator/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go +++ /dev/null @@ -1,161 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/duration.proto - -package duration - -import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -// A Duration represents a signed, fixed-length span of time represented -// as a count of seconds and fractions of seconds at nanosecond -// resolution. It is independent of any calendar and concepts like "day" -// or "month". It is related to Timestamp in that the difference between -// two Timestamp values is a Duration and it can be added or subtracted -// from a Timestamp. Range is approximately +-10,000 years. -// -// # Examples -// -// Example 1: Compute Duration from two Timestamps in pseudo code. -// -// Timestamp start = ...; -// Timestamp end = ...; -// Duration duration = ...; -// -// duration.seconds = end.seconds - start.seconds; -// duration.nanos = end.nanos - start.nanos; -// -// if (duration.seconds < 0 && duration.nanos > 0) { -// duration.seconds += 1; -// duration.nanos -= 1000000000; -// } else if (durations.seconds > 0 && duration.nanos < 0) { -// duration.seconds -= 1; -// duration.nanos += 1000000000; -// } -// -// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. -// -// Timestamp start = ...; -// Duration duration = ...; -// Timestamp end = ...; -// -// end.seconds = start.seconds + duration.seconds; -// end.nanos = start.nanos + duration.nanos; -// -// if (end.nanos < 0) { -// end.seconds -= 1; -// end.nanos += 1000000000; -// } else if (end.nanos >= 1000000000) { -// end.seconds += 1; -// end.nanos -= 1000000000; -// } -// -// Example 3: Compute Duration from datetime.timedelta in Python. -// -// td = datetime.timedelta(days=3, minutes=10) -// duration = Duration() -// duration.FromTimedelta(td) -// -// # JSON Mapping -// -// In JSON format, the Duration type is encoded as a string rather than an -// object, where the string ends in the suffix "s" (indicating seconds) and -// is preceded by the number of seconds, with nanoseconds expressed as -// fractional seconds. For example, 3 seconds with 0 nanoseconds should be -// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should -// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 -// microsecond should be expressed in JSON format as "3.000001s". -// -// -type Duration struct { - // Signed seconds of the span of time. Must be from -315,576,000,000 - // to +315,576,000,000 inclusive. Note: these bounds are computed from: - // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years - Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` - // Signed fractions of a second at nanosecond resolution of the span - // of time. Durations less than one second are represented with a 0 - // `seconds` field and a positive or negative `nanos` field. For durations - // of one second or more, a non-zero value for the `nanos` field must be - // of the same sign as the `seconds` field. Must be from -999,999,999 - // to +999,999,999 inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Duration) Reset() { *m = Duration{} } -func (m *Duration) String() string { return proto.CompactTextString(m) } -func (*Duration) ProtoMessage() {} -func (*Duration) Descriptor() ([]byte, []int) { - return fileDescriptor_23597b2ebd7ac6c5, []int{0} -} - -func (*Duration) XXX_WellKnownType() string { return "Duration" } - -func (m *Duration) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Duration.Unmarshal(m, b) -} -func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Duration.Marshal(b, m, deterministic) -} -func (m *Duration) XXX_Merge(src proto.Message) { - xxx_messageInfo_Duration.Merge(m, src) -} -func (m *Duration) XXX_Size() int { - return xxx_messageInfo_Duration.Size(m) -} -func (m *Duration) XXX_DiscardUnknown() { - xxx_messageInfo_Duration.DiscardUnknown(m) -} - -var xxx_messageInfo_Duration proto.InternalMessageInfo - -func (m *Duration) GetSeconds() int64 { - if m != nil { - return m.Seconds - } - return 0 -} - -func (m *Duration) GetNanos() int32 { - if m != nil { - return m.Nanos - } - return 0 -} - -func init() { - proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") -} - -func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5) } - -var fileDescriptor_23597b2ebd7ac6c5 = []byte{ - // 190 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a, - 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56, - 0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5, - 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e, - 0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0xc3, 0x25, 0x9c, 0x9c, - 0x9f, 0xab, 0x87, 0x66, 0xa4, 0x13, 0x2f, 0xcc, 0xc0, 0x00, 0x90, 0x48, 0x00, 0x63, 0x94, 0x56, - 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e, - 0x3a, 0xc2, 0x7d, 0x05, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x70, 0x67, 0xfe, 0x60, 0x64, 0x5c, 0xc4, - 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, 0x00, 0x54, 0xa9, 0x5e, 0x78, - 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, 0x12, 0x1b, 0xd8, 0x0c, 0x63, - 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x84, 0x30, 0xff, 0xf3, 0x00, 0x00, 0x00, -} diff --git a/cmd/bpa-operator/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto b/cmd/bpa-operator/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto deleted file mode 100644 index 975fce4..0000000 --- a/cmd/bpa-operator/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto +++ /dev/null @@ -1,117 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package google.protobuf; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option cc_enable_arenas = true; -option go_package = "github.com/golang/protobuf/ptypes/duration"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "DurationProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; - -// A Duration represents a signed, fixed-length span of time represented -// as a count of seconds and fractions of seconds at nanosecond -// resolution. It is independent of any calendar and concepts like "day" -// or "month". It is related to Timestamp in that the difference between -// two Timestamp values is a Duration and it can be added or subtracted -// from a Timestamp. Range is approximately +-10,000 years. -// -// # Examples -// -// Example 1: Compute Duration from two Timestamps in pseudo code. -// -// Timestamp start = ...; -// Timestamp end = ...; -// Duration duration = ...; -// -// duration.seconds = end.seconds - start.seconds; -// duration.nanos = end.nanos - start.nanos; -// -// if (duration.seconds < 0 && duration.nanos > 0) { -// duration.seconds += 1; -// duration.nanos -= 1000000000; -// } else if (durations.seconds > 0 && duration.nanos < 0) { -// duration.seconds -= 1; -// duration.nanos += 1000000000; -// } -// -// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. -// -// Timestamp start = ...; -// Duration duration = ...; -// Timestamp end = ...; -// -// end.seconds = start.seconds + duration.seconds; -// end.nanos = start.nanos + duration.nanos; -// -// if (end.nanos < 0) { -// end.seconds -= 1; -// end.nanos += 1000000000; -// } else if (end.nanos >= 1000000000) { -// end.seconds += 1; -// end.nanos -= 1000000000; -// } -// -// Example 3: Compute Duration from datetime.timedelta in Python. -// -// td = datetime.timedelta(days=3, minutes=10) -// duration = Duration() -// duration.FromTimedelta(td) -// -// # JSON Mapping -// -// In JSON format, the Duration type is encoded as a string rather than an -// object, where the string ends in the suffix "s" (indicating seconds) and -// is preceded by the number of seconds, with nanoseconds expressed as -// fractional seconds. For example, 3 seconds with 0 nanoseconds should be -// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should -// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 -// microsecond should be expressed in JSON format as "3.000001s". -// -// -message Duration { - - // Signed seconds of the span of time. Must be from -315,576,000,000 - // to +315,576,000,000 inclusive. Note: these bounds are computed from: - // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years - int64 seconds = 1; - - // Signed fractions of a second at nanosecond resolution of the span - // of time. Durations less than one second are represented with a 0 - // `seconds` field and a positive or negative `nanos` field. For durations - // of one second or more, a non-zero value for the `nanos` field must be - // of the same sign as the `seconds` field. Must be from -999,999,999 - // to +999,999,999 inclusive. - int32 nanos = 2; -} diff --git a/cmd/bpa-operator/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go b/cmd/bpa-operator/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go deleted file mode 100644 index 33daa73..0000000 --- a/cmd/bpa-operator/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go +++ /dev/null @@ -1,336 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/struct.proto - -package structpb - -import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -// `NullValue` is a singleton enumeration to represent the null value for the -// `Value` type union. -// -// The JSON representation for `NullValue` is JSON `null`. -type NullValue int32 - -const ( - // Null value. - NullValue_NULL_VALUE NullValue = 0 -) - -var NullValue_name = map[int32]string{ - 0: "NULL_VALUE", -} - -var NullValue_value = map[string]int32{ - "NULL_VALUE": 0, -} - -func (x NullValue) String() string { - return proto.EnumName(NullValue_name, int32(x)) -} - -func (NullValue) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_df322afd6c9fb402, []int{0} -} - -func (NullValue) XXX_WellKnownType() string { return "NullValue" } - -// `Struct` represents a structured data value, consisting of fields -// which map to dynamically typed values. In some languages, `Struct` -// might be supported by a native representation. For example, in -// scripting languages like JS a struct is represented as an -// object. The details of that representation are described together -// with the proto support for the language. -// -// The JSON representation for `Struct` is JSON object. -type Struct struct { - // Unordered map of dynamically typed values. - Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Struct) Reset() { *m = Struct{} } -func (m *Struct) String() string { return proto.CompactTextString(m) } -func (*Struct) ProtoMessage() {} -func (*Struct) Descriptor() ([]byte, []int) { - return fileDescriptor_df322afd6c9fb402, []int{0} -} - -func (*Struct) XXX_WellKnownType() string { return "Struct" } - -func (m *Struct) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Struct.Unmarshal(m, b) -} -func (m *Struct) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Struct.Marshal(b, m, deterministic) -} -func (m *Struct) XXX_Merge(src proto.Message) { - xxx_messageInfo_Struct.Merge(m, src) -} -func (m *Struct) XXX_Size() int { - return xxx_messageInfo_Struct.Size(m) -} -func (m *Struct) XXX_DiscardUnknown() { - xxx_messageInfo_Struct.DiscardUnknown(m) -} - -var xxx_messageInfo_Struct proto.InternalMessageInfo - -func (m *Struct) GetFields() map[string]*Value { - if m != nil { - return m.Fields - } - return nil -} - -// `Value` represents a dynamically typed value which can be either -// null, a number, a string, a boolean, a recursive struct value, or a -// list of values. A producer of value is expected to set one of that -// variants, absence of any variant indicates an error. -// -// The JSON representation for `Value` is JSON value. -type Value struct { - // The kind of value. - // - // Types that are valid to be assigned to Kind: - // *Value_NullValue - // *Value_NumberValue - // *Value_StringValue - // *Value_BoolValue - // *Value_StructValue - // *Value_ListValue - Kind isValue_Kind `protobuf_oneof:"kind"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Value) Reset() { *m = Value{} } -func (m *Value) String() string { return proto.CompactTextString(m) } -func (*Value) ProtoMessage() {} -func (*Value) Descriptor() ([]byte, []int) { - return fileDescriptor_df322afd6c9fb402, []int{1} -} - -func (*Value) XXX_WellKnownType() string { return "Value" } - -func (m *Value) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Value.Unmarshal(m, b) -} -func (m *Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Value.Marshal(b, m, deterministic) -} -func (m *Value) XXX_Merge(src proto.Message) { - xxx_messageInfo_Value.Merge(m, src) -} -func (m *Value) XXX_Size() int { - return xxx_messageInfo_Value.Size(m) -} -func (m *Value) XXX_DiscardUnknown() { - xxx_messageInfo_Value.DiscardUnknown(m) -} - -var xxx_messageInfo_Value proto.InternalMessageInfo - -type isValue_Kind interface { - isValue_Kind() -} - -type Value_NullValue struct { - NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"` -} - -type Value_NumberValue struct { - NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,proto3,oneof"` -} - -type Value_StringValue struct { - StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof"` -} - -type Value_BoolValue struct { - BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,proto3,oneof"` -} - -type Value_StructValue struct { - StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,proto3,oneof"` -} - -type Value_ListValue struct { - ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,proto3,oneof"` -} - -func (*Value_NullValue) isValue_Kind() {} - -func (*Value_NumberValue) isValue_Kind() {} - -func (*Value_StringValue) isValue_Kind() {} - -func (*Value_BoolValue) isValue_Kind() {} - -func (*Value_StructValue) isValue_Kind() {} - -func (*Value_ListValue) isValue_Kind() {} - -func (m *Value) GetKind() isValue_Kind { - if m != nil { - return m.Kind - } - return nil -} - -func (m *Value) GetNullValue() NullValue { - if x, ok := m.GetKind().(*Value_NullValue); ok { - return x.NullValue - } - return NullValue_NULL_VALUE -} - -func (m *Value) GetNumberValue() float64 { - if x, ok := m.GetKind().(*Value_NumberValue); ok { - return x.NumberValue - } - return 0 -} - -func (m *Value) GetStringValue() string { - if x, ok := m.GetKind().(*Value_StringValue); ok { - return x.StringValue - } - return "" -} - -func (m *Value) GetBoolValue() bool { - if x, ok := m.GetKind().(*Value_BoolValue); ok { - return x.BoolValue - } - return false -} - -func (m *Value) GetStructValue() *Struct { - if x, ok := m.GetKind().(*Value_StructValue); ok { - return x.StructValue - } - return nil -} - -func (m *Value) GetListValue() *ListValue { - if x, ok := m.GetKind().(*Value_ListValue); ok { - return x.ListValue - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*Value) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*Value_NullValue)(nil), - (*Value_NumberValue)(nil), - (*Value_StringValue)(nil), - (*Value_BoolValue)(nil), - (*Value_StructValue)(nil), - (*Value_ListValue)(nil), - } -} - -// `ListValue` is a wrapper around a repeated field of values. -// -// The JSON representation for `ListValue` is JSON array. -type ListValue struct { - // Repeated field of dynamically typed values. - Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListValue) Reset() { *m = ListValue{} } -func (m *ListValue) String() string { return proto.CompactTextString(m) } -func (*ListValue) ProtoMessage() {} -func (*ListValue) Descriptor() ([]byte, []int) { - return fileDescriptor_df322afd6c9fb402, []int{2} -} - -func (*ListValue) XXX_WellKnownType() string { return "ListValue" } - -func (m *ListValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListValue.Unmarshal(m, b) -} -func (m *ListValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListValue.Marshal(b, m, deterministic) -} -func (m *ListValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListValue.Merge(m, src) -} -func (m *ListValue) XXX_Size() int { - return xxx_messageInfo_ListValue.Size(m) -} -func (m *ListValue) XXX_DiscardUnknown() { - xxx_messageInfo_ListValue.DiscardUnknown(m) -} - -var xxx_messageInfo_ListValue proto.InternalMessageInfo - -func (m *ListValue) GetValues() []*Value { - if m != nil { - return m.Values - } - return nil -} - -func init() { - proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value) - proto.RegisterType((*Struct)(nil), "google.protobuf.Struct") - proto.RegisterMapType((map[string]*Value)(nil), "google.protobuf.Struct.FieldsEntry") - proto.RegisterType((*Value)(nil), "google.protobuf.Value") - proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue") -} - -func init() { proto.RegisterFile("google/protobuf/struct.proto", fileDescriptor_df322afd6c9fb402) } - -var fileDescriptor_df322afd6c9fb402 = []byte{ - // 417 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0x41, 0x8b, 0xd3, 0x40, - 0x14, 0xc7, 0x3b, 0xc9, 0x36, 0x98, 0x17, 0x59, 0x97, 0x11, 0xb4, 0xac, 0xa2, 0xa1, 0x7b, 0x09, - 0x22, 0x29, 0xd6, 0x8b, 0x18, 0x2f, 0x06, 0xd6, 0x5d, 0x30, 0x2c, 0x31, 0xba, 0x15, 0xbc, 0x94, - 0x26, 0x4d, 0x63, 0xe8, 0x74, 0x26, 0x24, 0x33, 0x4a, 0x8f, 0x7e, 0x0b, 0xcf, 0x1e, 0x3d, 0xfa, - 0xe9, 0x3c, 0xca, 0xcc, 0x24, 0xa9, 0xb4, 0xf4, 0x94, 0xbc, 0xf7, 0x7e, 0xef, 0x3f, 0xef, 0xff, - 0x66, 0xe0, 0x71, 0xc1, 0x58, 0x41, 0xf2, 0x49, 0x55, 0x33, 0xce, 0x52, 0xb1, 0x9a, 0x34, 0xbc, - 0x16, 0x19, 0xf7, 0x55, 0x8c, 0xef, 0xe9, 0xaa, 0xdf, 0x55, 0xc7, 0x3f, 0x11, 0x58, 0x1f, 0x15, - 0x81, 0x03, 0xb0, 0x56, 0x65, 0x4e, 0x96, 0xcd, 0x08, 0xb9, 0xa6, 0xe7, 0x4c, 0x2f, 0xfc, 0x3d, - 0xd8, 0xd7, 0xa0, 0xff, 0x4e, 0x51, 0x97, 0x94, 0xd7, 0xdb, 0xa4, 0x6d, 0x39, 0xff, 0x00, 0xce, - 0x7f, 0x69, 0x7c, 0x06, 0xe6, 0x3a, 0xdf, 0x8e, 0x90, 0x8b, 0x3c, 0x3b, 0x91, 0xbf, 0xf8, 0x39, - 0x0c, 0xbf, 0x2d, 0x88, 0xc8, 0x47, 0x86, 0x8b, 0x3c, 0x67, 0xfa, 0xe0, 0x40, 0x7c, 0x26, 0xab, - 0x89, 0x86, 0x5e, 0x1b, 0xaf, 0xd0, 0xf8, 0x8f, 0x01, 0x43, 0x95, 0xc4, 0x01, 0x00, 0x15, 0x84, - 0xcc, 0xb5, 0x80, 0x14, 0x3d, 0x9d, 0x9e, 0x1f, 0x08, 0xdc, 0x08, 0x42, 0x14, 0x7f, 0x3d, 0x48, - 0x6c, 0xda, 0x05, 0xf8, 0x02, 0xee, 0x52, 0xb1, 0x49, 0xf3, 0x7a, 0xbe, 0x3b, 0x1f, 0x5d, 0x0f, - 0x12, 0x47, 0x67, 0x7b, 0xa8, 0xe1, 0x75, 0x49, 0x8b, 0x16, 0x32, 0xe5, 0xe0, 0x12, 0xd2, 0x59, - 0x0d, 0x3d, 0x05, 0x48, 0x19, 0xeb, 0xc6, 0x38, 0x71, 0x91, 0x77, 0x47, 0x1e, 0x25, 0x73, 0x1a, - 0x78, 0xa3, 0x54, 0x44, 0xc6, 0x5b, 0x64, 0xa8, 0xac, 0x3e, 0x3c, 0xb2, 0xc7, 0x56, 0x5e, 0x64, - 0xbc, 0x77, 0x49, 0xca, 0xa6, 0xeb, 0xb5, 0x54, 0xef, 0xa1, 0xcb, 0xa8, 0x6c, 0x78, 0xef, 0x92, - 0x74, 0x41, 0x68, 0xc1, 0xc9, 0xba, 0xa4, 0xcb, 0x71, 0x00, 0x76, 0x4f, 0x60, 0x1f, 0x2c, 0x25, - 0xd6, 0xdd, 0xe8, 0xb1, 0xa5, 0xb7, 0xd4, 0xb3, 0x47, 0x60, 0xf7, 0x4b, 0xc4, 0xa7, 0x00, 0x37, - 0xb7, 0x51, 0x34, 0x9f, 0xbd, 0x8d, 0x6e, 0x2f, 0xcf, 0x06, 0xe1, 0x0f, 0x04, 0xf7, 0x33, 0xb6, - 0xd9, 0x97, 0x08, 0x1d, 0xed, 0x26, 0x96, 0x71, 0x8c, 0xbe, 0xbc, 0x28, 0x4a, 0xfe, 0x55, 0xa4, - 0x7e, 0xc6, 0x36, 0x93, 0x82, 0x91, 0x05, 0x2d, 0x76, 0x4f, 0xb1, 0xe2, 0xdb, 0x2a, 0x6f, 0xda, - 0x17, 0x19, 0xe8, 0x4f, 0x95, 0xfe, 0x45, 0xe8, 0x97, 0x61, 0x5e, 0xc5, 0xe1, 0x6f, 0xe3, 0xc9, - 0x95, 0x16, 0x8f, 0xbb, 0xf9, 0x3e, 0xe7, 0x84, 0xbc, 0xa7, 0xec, 0x3b, 0xfd, 0x24, 0x3b, 0x53, - 0x4b, 0x49, 0xbd, 0xfc, 0x17, 0x00, 0x00, 0xff, 0xff, 0xe8, 0x1b, 0x59, 0xf8, 0xe5, 0x02, 0x00, - 0x00, -} diff --git a/cmd/bpa-operator/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto b/cmd/bpa-operator/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto deleted file mode 100644 index 7d7808e..0000000 --- a/cmd/bpa-operator/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto +++ /dev/null @@ -1,96 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package google.protobuf; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option cc_enable_arenas = true; -option go_package = "github.com/golang/protobuf/ptypes/struct;structpb"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "StructProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; - - -// `Struct` represents a structured data value, consisting of fields -// which map to dynamically typed values. In some languages, `Struct` -// might be supported by a native representation. For example, in -// scripting languages like JS a struct is represented as an -// object. The details of that representation are described together -// with the proto support for the language. -// -// The JSON representation for `Struct` is JSON object. -message Struct { - // Unordered map of dynamically typed values. - map fields = 1; -} - -// `Value` represents a dynamically typed value which can be either -// null, a number, a string, a boolean, a recursive struct value, or a -// list of values. A producer of value is expected to set one of that -// variants, absence of any variant indicates an error. -// -// The JSON representation for `Value` is JSON value. -message Value { - // The kind of value. - oneof kind { - // Represents a null value. - NullValue null_value = 1; - // Represents a double value. - double number_value = 2; - // Represents a string value. - string string_value = 3; - // Represents a boolean value. - bool bool_value = 4; - // Represents a structured value. - Struct struct_value = 5; - // Represents a repeated `Value`. - ListValue list_value = 6; - } -} - -// `NullValue` is a singleton enumeration to represent the null value for the -// `Value` type union. -// -// The JSON representation for `NullValue` is JSON `null`. -enum NullValue { - // Null value. - NULL_VALUE = 0; -} - -// `ListValue` is a wrapper around a repeated field of values. -// -// The JSON representation for `ListValue` is JSON array. -message ListValue { - // Repeated field of dynamically typed values. - repeated Value values = 1; -} diff --git a/cmd/bpa-operator/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/cmd/bpa-operator/vendor/github.com/golang/protobuf/ptypes/timestamp.go deleted file mode 100644 index 8da0df0..0000000 --- a/cmd/bpa-operator/vendor/github.com/golang/protobuf/ptypes/timestamp.go +++ /dev/null @@ -1,132 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package ptypes - -// This file implements operations on google.protobuf.Timestamp. - -import ( - "errors" - "fmt" - "time" - - tspb "github.com/golang/protobuf/ptypes/timestamp" -) - -const ( - // Seconds field of the earliest valid Timestamp. - // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - minValidSeconds = -62135596800 - // Seconds field just after the latest valid Timestamp. - // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - maxValidSeconds = 253402300800 -) - -// validateTimestamp determines whether a Timestamp is valid. -// A valid timestamp represents a time in the range -// [0001-01-01, 10000-01-01) and has a Nanos field -// in the range [0, 1e9). -// -// If the Timestamp is valid, validateTimestamp returns nil. -// Otherwise, it returns an error that describes -// the problem. -// -// Every valid Timestamp can be represented by a time.Time, but the converse is not true. -func validateTimestamp(ts *tspb.Timestamp) error { - if ts == nil { - return errors.New("timestamp: nil Timestamp") - } - if ts.Seconds < minValidSeconds { - return fmt.Errorf("timestamp: %v before 0001-01-01", ts) - } - if ts.Seconds >= maxValidSeconds { - return fmt.Errorf("timestamp: %v after 10000-01-01", ts) - } - if ts.Nanos < 0 || ts.Nanos >= 1e9 { - return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts) - } - return nil -} - -// Timestamp converts a google.protobuf.Timestamp proto to a time.Time. -// It returns an error if the argument is invalid. -// -// Unlike most Go functions, if Timestamp returns an error, the first return value -// is not the zero time.Time. Instead, it is the value obtained from the -// time.Unix function when passed the contents of the Timestamp, in the UTC -// locale. This may or may not be a meaningful time; many invalid Timestamps -// do map to valid time.Times. -// -// A nil Timestamp returns an error. The first return value in that case is -// undefined. -func Timestamp(ts *tspb.Timestamp) (time.Time, error) { - // Don't return the zero value on error, because corresponds to a valid - // timestamp. Instead return whatever time.Unix gives us. - var t time.Time - if ts == nil { - t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp - } else { - t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() - } - return t, validateTimestamp(ts) -} - -// TimestampNow returns a google.protobuf.Timestamp for the current time. -func TimestampNow() *tspb.Timestamp { - ts, err := TimestampProto(time.Now()) - if err != nil { - panic("ptypes: time.Now() out of Timestamp range") - } - return ts -} - -// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. -// It returns an error if the resulting Timestamp is invalid. -func TimestampProto(t time.Time) (*tspb.Timestamp, error) { - ts := &tspb.Timestamp{ - Seconds: t.Unix(), - Nanos: int32(t.Nanosecond()), - } - if err := validateTimestamp(ts); err != nil { - return nil, err - } - return ts, nil -} - -// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid -// Timestamps, it returns an error message in parentheses. -func TimestampString(ts *tspb.Timestamp) string { - t, err := Timestamp(ts) - if err != nil { - return fmt.Sprintf("(%v)", err) - } - return t.Format(time.RFC3339Nano) -} diff --git a/cmd/bpa-operator/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/cmd/bpa-operator/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go deleted file mode 100644 index 31cd846..0000000 --- a/cmd/bpa-operator/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go +++ /dev/null @@ -1,179 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/timestamp.proto - -package timestamp - -import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -// A Timestamp represents a point in time independent of any time zone -// or calendar, represented as seconds and fractions of seconds at -// nanosecond resolution in UTC Epoch time. It is encoded using the -// Proleptic Gregorian Calendar which extends the Gregorian calendar -// backwards to year one. It is encoded assuming all minutes are 60 -// seconds long, i.e. leap seconds are "smeared" so that no leap second -// table is needed for interpretation. Range is from -// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. -// By restricting to that range, we ensure that we can convert to -// and from RFC 3339 date strings. -// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). -// -// # Examples -// -// Example 1: Compute Timestamp from POSIX `time()`. -// -// Timestamp timestamp; -// timestamp.set_seconds(time(NULL)); -// timestamp.set_nanos(0); -// -// Example 2: Compute Timestamp from POSIX `gettimeofday()`. -// -// struct timeval tv; -// gettimeofday(&tv, NULL); -// -// Timestamp timestamp; -// timestamp.set_seconds(tv.tv_sec); -// timestamp.set_nanos(tv.tv_usec * 1000); -// -// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. -// -// FILETIME ft; -// GetSystemTimeAsFileTime(&ft); -// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; -// -// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z -// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. -// Timestamp timestamp; -// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); -// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); -// -// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. -// -// long millis = System.currentTimeMillis(); -// -// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) -// .setNanos((int) ((millis % 1000) * 1000000)).build(); -// -// -// Example 5: Compute Timestamp from current time in Python. -// -// timestamp = Timestamp() -// timestamp.GetCurrentTime() -// -// # JSON Mapping -// -// In JSON format, the Timestamp type is encoded as a string in the -// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the -// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" -// where {year} is always expressed using four digits while {month}, {day}, -// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional -// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), -// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone -// is required. A proto3 JSON serializer should always use UTC (as indicated by -// "Z") when printing the Timestamp type and a proto3 JSON parser should be -// able to accept both UTC and other timezones (as indicated by an offset). -// -// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past -// 01:30 UTC on January 15, 2017. -// -// In JavaScript, one can convert a Date object to this format using the -// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] -// method. In Python, a standard `datetime.datetime` object can be converted -// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) -// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one -// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( -// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime-- -// ) to obtain a formatter capable of generating timestamps in this format. -// -// -type Timestamp struct { - // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to - // 9999-12-31T23:59:59Z inclusive. - Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` - // Non-negative fractions of a second at nanosecond resolution. Negative - // second values with fractions must still have non-negative nanos values - // that count forward in time. Must be from 0 to 999,999,999 - // inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Timestamp) Reset() { *m = Timestamp{} } -func (m *Timestamp) String() string { return proto.CompactTextString(m) } -func (*Timestamp) ProtoMessage() {} -func (*Timestamp) Descriptor() ([]byte, []int) { - return fileDescriptor_292007bbfe81227e, []int{0} -} - -func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } - -func (m *Timestamp) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Timestamp.Unmarshal(m, b) -} -func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic) -} -func (m *Timestamp) XXX_Merge(src proto.Message) { - xxx_messageInfo_Timestamp.Merge(m, src) -} -func (m *Timestamp) XXX_Size() int { - return xxx_messageInfo_Timestamp.Size(m) -} -func (m *Timestamp) XXX_DiscardUnknown() { - xxx_messageInfo_Timestamp.DiscardUnknown(m) -} - -var xxx_messageInfo_Timestamp proto.InternalMessageInfo - -func (m *Timestamp) GetSeconds() int64 { - if m != nil { - return m.Seconds - } - return 0 -} - -func (m *Timestamp) GetNanos() int32 { - if m != nil { - return m.Nanos - } - return 0 -} - -func init() { - proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") -} - -func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e) } - -var fileDescriptor_292007bbfe81227e = []byte{ - // 191 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d, - 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28, - 0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5, - 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89, - 0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x1d, 0x97, 0x70, - 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0x99, 0x4e, 0x7c, 0x70, 0x13, 0x03, 0x40, 0x42, 0x01, 0x8c, 0x51, - 0xda, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xe9, 0xf9, 0x39, 0x89, - 0x79, 0xe9, 0x08, 0x27, 0x16, 0x94, 0x54, 0x16, 0xa4, 0x16, 0x23, 0x5c, 0xfa, 0x83, 0x91, 0x71, - 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xc9, 0x01, 0x50, 0xb5, 0x7a, - 0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x3d, 0x49, 0x6c, 0x60, 0x43, - 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x77, 0x4a, 0x07, 0xf7, 0x00, 0x00, 0x00, -} diff --git a/cmd/bpa-operator/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/cmd/bpa-operator/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto deleted file mode 100644 index eafb3fa..0000000 --- a/cmd/bpa-operator/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto +++ /dev/null @@ -1,135 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package google.protobuf; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option cc_enable_arenas = true; -option go_package = "github.com/golang/protobuf/ptypes/timestamp"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "TimestampProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; - -// A Timestamp represents a point in time independent of any time zone -// or calendar, represented as seconds and fractions of seconds at -// nanosecond resolution in UTC Epoch time. It is encoded using the -// Proleptic Gregorian Calendar which extends the Gregorian calendar -// backwards to year one. It is encoded assuming all minutes are 60 -// seconds long, i.e. leap seconds are "smeared" so that no leap second -// table is needed for interpretation. Range is from -// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. -// By restricting to that range, we ensure that we can convert to -// and from RFC 3339 date strings. -// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). -// -// # Examples -// -// Example 1: Compute Timestamp from POSIX `time()`. -// -// Timestamp timestamp; -// timestamp.set_seconds(time(NULL)); -// timestamp.set_nanos(0); -// -// Example 2: Compute Timestamp from POSIX `gettimeofday()`. -// -// struct timeval tv; -// gettimeofday(&tv, NULL); -// -// Timestamp timestamp; -// timestamp.set_seconds(tv.tv_sec); -// timestamp.set_nanos(tv.tv_usec * 1000); -// -// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. -// -// FILETIME ft; -// GetSystemTimeAsFileTime(&ft); -// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; -// -// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z -// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. -// Timestamp timestamp; -// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); -// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); -// -// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. -// -// long millis = System.currentTimeMillis(); -// -// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) -// .setNanos((int) ((millis % 1000) * 1000000)).build(); -// -// -// Example 5: Compute Timestamp from current time in Python. -// -// timestamp = Timestamp() -// timestamp.GetCurrentTime() -// -// # JSON Mapping -// -// In JSON format, the Timestamp type is encoded as a string in the -// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the -// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" -// where {year} is always expressed using four digits while {month}, {day}, -// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional -// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), -// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone -// is required. A proto3 JSON serializer should always use UTC (as indicated by -// "Z") when printing the Timestamp type and a proto3 JSON parser should be -// able to accept both UTC and other timezones (as indicated by an offset). -// -// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past -// 01:30 UTC on January 15, 2017. -// -// In JavaScript, one can convert a Date object to this format using the -// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] -// method. In Python, a standard `datetime.datetime` object can be converted -// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) -// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one -// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( -// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime-- -// ) to obtain a formatter capable of generating timestamps in this format. -// -// -message Timestamp { - - // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to - // 9999-12-31T23:59:59Z inclusive. - int64 seconds = 1; - - // Non-negative fractions of a second at nanosecond resolution. Negative - // second values with fractions must still have non-negative nanos values - // that count forward in time. Must be from 0 to 999,999,999 - // inclusive. - int32 nanos = 2; -} diff --git a/cmd/bpa-operator/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go b/cmd/bpa-operator/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go deleted file mode 100644 index add19a1..0000000 --- a/cmd/bpa-operator/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go +++ /dev/null @@ -1,461 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/wrappers.proto - -package wrappers - -import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -// Wrapper message for `double`. -// -// The JSON representation for `DoubleValue` is JSON number. -type DoubleValue struct { - // The double value. - Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DoubleValue) Reset() { *m = DoubleValue{} } -func (m *DoubleValue) String() string { return proto.CompactTextString(m) } -func (*DoubleValue) ProtoMessage() {} -func (*DoubleValue) Descriptor() ([]byte, []int) { - return fileDescriptor_5377b62bda767935, []int{0} -} - -func (*DoubleValue) XXX_WellKnownType() string { return "DoubleValue" } - -func (m *DoubleValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DoubleValue.Unmarshal(m, b) -} -func (m *DoubleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DoubleValue.Marshal(b, m, deterministic) -} -func (m *DoubleValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_DoubleValue.Merge(m, src) -} -func (m *DoubleValue) XXX_Size() int { - return xxx_messageInfo_DoubleValue.Size(m) -} -func (m *DoubleValue) XXX_DiscardUnknown() { - xxx_messageInfo_DoubleValue.DiscardUnknown(m) -} - -var xxx_messageInfo_DoubleValue proto.InternalMessageInfo - -func (m *DoubleValue) GetValue() float64 { - if m != nil { - return m.Value - } - return 0 -} - -// Wrapper message for `float`. -// -// The JSON representation for `FloatValue` is JSON number. -type FloatValue struct { - // The float value. - Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FloatValue) Reset() { *m = FloatValue{} } -func (m *FloatValue) String() string { return proto.CompactTextString(m) } -func (*FloatValue) ProtoMessage() {} -func (*FloatValue) Descriptor() ([]byte, []int) { - return fileDescriptor_5377b62bda767935, []int{1} -} - -func (*FloatValue) XXX_WellKnownType() string { return "FloatValue" } - -func (m *FloatValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FloatValue.Unmarshal(m, b) -} -func (m *FloatValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FloatValue.Marshal(b, m, deterministic) -} -func (m *FloatValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_FloatValue.Merge(m, src) -} -func (m *FloatValue) XXX_Size() int { - return xxx_messageInfo_FloatValue.Size(m) -} -func (m *FloatValue) XXX_DiscardUnknown() { - xxx_messageInfo_FloatValue.DiscardUnknown(m) -} - -var xxx_messageInfo_FloatValue proto.InternalMessageInfo - -func (m *FloatValue) GetValue() float32 { - if m != nil { - return m.Value - } - return 0 -} - -// Wrapper message for `int64`. -// -// The JSON representation for `Int64Value` is JSON string. -type Int64Value struct { - // The int64 value. - Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Int64Value) Reset() { *m = Int64Value{} } -func (m *Int64Value) String() string { return proto.CompactTextString(m) } -func (*Int64Value) ProtoMessage() {} -func (*Int64Value) Descriptor() ([]byte, []int) { - return fileDescriptor_5377b62bda767935, []int{2} -} - -func (*Int64Value) XXX_WellKnownType() string { return "Int64Value" } - -func (m *Int64Value) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Int64Value.Unmarshal(m, b) -} -func (m *Int64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Int64Value.Marshal(b, m, deterministic) -} -func (m *Int64Value) XXX_Merge(src proto.Message) { - xxx_messageInfo_Int64Value.Merge(m, src) -} -func (m *Int64Value) XXX_Size() int { - return xxx_messageInfo_Int64Value.Size(m) -} -func (m *Int64Value) XXX_DiscardUnknown() { - xxx_messageInfo_Int64Value.DiscardUnknown(m) -} - -var xxx_messageInfo_Int64Value proto.InternalMessageInfo - -func (m *Int64Value) GetValue() int64 { - if m != nil { - return m.Value - } - return 0 -} - -// Wrapper message for `uint64`. -// -// The JSON representation for `UInt64Value` is JSON string. -type UInt64Value struct { - // The uint64 value. - Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UInt64Value) Reset() { *m = UInt64Value{} } -func (m *UInt64Value) String() string { return proto.CompactTextString(m) } -func (*UInt64Value) ProtoMessage() {} -func (*UInt64Value) Descriptor() ([]byte, []int) { - return fileDescriptor_5377b62bda767935, []int{3} -} - -func (*UInt64Value) XXX_WellKnownType() string { return "UInt64Value" } - -func (m *UInt64Value) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UInt64Value.Unmarshal(m, b) -} -func (m *UInt64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UInt64Value.Marshal(b, m, deterministic) -} -func (m *UInt64Value) XXX_Merge(src proto.Message) { - xxx_messageInfo_UInt64Value.Merge(m, src) -} -func (m *UInt64Value) XXX_Size() int { - return xxx_messageInfo_UInt64Value.Size(m) -} -func (m *UInt64Value) XXX_DiscardUnknown() { - xxx_messageInfo_UInt64Value.DiscardUnknown(m) -} - -var xxx_messageInfo_UInt64Value proto.InternalMessageInfo - -func (m *UInt64Value) GetValue() uint64 { - if m != nil { - return m.Value - } - return 0 -} - -// Wrapper message for `int32`. -// -// The JSON representation for `Int32Value` is JSON number. -type Int32Value struct { - // The int32 value. - Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Int32Value) Reset() { *m = Int32Value{} } -func (m *Int32Value) String() string { return proto.CompactTextString(m) } -func (*Int32Value) ProtoMessage() {} -func (*Int32Value) Descriptor() ([]byte, []int) { - return fileDescriptor_5377b62bda767935, []int{4} -} - -func (*Int32Value) XXX_WellKnownType() string { return "Int32Value" } - -func (m *Int32Value) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Int32Value.Unmarshal(m, b) -} -func (m *Int32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Int32Value.Marshal(b, m, deterministic) -} -func (m *Int32Value) XXX_Merge(src proto.Message) { - xxx_messageInfo_Int32Value.Merge(m, src) -} -func (m *Int32Value) XXX_Size() int { - return xxx_messageInfo_Int32Value.Size(m) -} -func (m *Int32Value) XXX_DiscardUnknown() { - xxx_messageInfo_Int32Value.DiscardUnknown(m) -} - -var xxx_messageInfo_Int32Value proto.InternalMessageInfo - -func (m *Int32Value) GetValue() int32 { - if m != nil { - return m.Value - } - return 0 -} - -// Wrapper message for `uint32`. -// -// The JSON representation for `UInt32Value` is JSON number. -type UInt32Value struct { - // The uint32 value. - Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UInt32Value) Reset() { *m = UInt32Value{} } -func (m *UInt32Value) String() string { return proto.CompactTextString(m) } -func (*UInt32Value) ProtoMessage() {} -func (*UInt32Value) Descriptor() ([]byte, []int) { - return fileDescriptor_5377b62bda767935, []int{5} -} - -func (*UInt32Value) XXX_WellKnownType() string { return "UInt32Value" } - -func (m *UInt32Value) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UInt32Value.Unmarshal(m, b) -} -func (m *UInt32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UInt32Value.Marshal(b, m, deterministic) -} -func (m *UInt32Value) XXX_Merge(src proto.Message) { - xxx_messageInfo_UInt32Value.Merge(m, src) -} -func (m *UInt32Value) XXX_Size() int { - return xxx_messageInfo_UInt32Value.Size(m) -} -func (m *UInt32Value) XXX_DiscardUnknown() { - xxx_messageInfo_UInt32Value.DiscardUnknown(m) -} - -var xxx_messageInfo_UInt32Value proto.InternalMessageInfo - -func (m *UInt32Value) GetValue() uint32 { - if m != nil { - return m.Value - } - return 0 -} - -// Wrapper message for `bool`. -// -// The JSON representation for `BoolValue` is JSON `true` and `false`. -type BoolValue struct { - // The bool value. - Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BoolValue) Reset() { *m = BoolValue{} } -func (m *BoolValue) String() string { return proto.CompactTextString(m) } -func (*BoolValue) ProtoMessage() {} -func (*BoolValue) Descriptor() ([]byte, []int) { - return fileDescriptor_5377b62bda767935, []int{6} -} - -func (*BoolValue) XXX_WellKnownType() string { return "BoolValue" } - -func (m *BoolValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BoolValue.Unmarshal(m, b) -} -func (m *BoolValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BoolValue.Marshal(b, m, deterministic) -} -func (m *BoolValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_BoolValue.Merge(m, src) -} -func (m *BoolValue) XXX_Size() int { - return xxx_messageInfo_BoolValue.Size(m) -} -func (m *BoolValue) XXX_DiscardUnknown() { - xxx_messageInfo_BoolValue.DiscardUnknown(m) -} - -var xxx_messageInfo_BoolValue proto.InternalMessageInfo - -func (m *BoolValue) GetValue() bool { - if m != nil { - return m.Value - } - return false -} - -// Wrapper message for `string`. -// -// The JSON representation for `StringValue` is JSON string. -type StringValue struct { - // The string value. - Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StringValue) Reset() { *m = StringValue{} } -func (m *StringValue) String() string { return proto.CompactTextString(m) } -func (*StringValue) ProtoMessage() {} -func (*StringValue) Descriptor() ([]byte, []int) { - return fileDescriptor_5377b62bda767935, []int{7} -} - -func (*StringValue) XXX_WellKnownType() string { return "StringValue" } - -func (m *StringValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StringValue.Unmarshal(m, b) -} -func (m *StringValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StringValue.Marshal(b, m, deterministic) -} -func (m *StringValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_StringValue.Merge(m, src) -} -func (m *StringValue) XXX_Size() int { - return xxx_messageInfo_StringValue.Size(m) -} -func (m *StringValue) XXX_DiscardUnknown() { - xxx_messageInfo_StringValue.DiscardUnknown(m) -} - -var xxx_messageInfo_StringValue proto.InternalMessageInfo - -func (m *StringValue) GetValue() string { - if m != nil { - return m.Value - } - return "" -} - -// Wrapper message for `bytes`. -// -// The JSON representation for `BytesValue` is JSON string. -type BytesValue struct { - // The bytes value. - Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BytesValue) Reset() { *m = BytesValue{} } -func (m *BytesValue) String() string { return proto.CompactTextString(m) } -func (*BytesValue) ProtoMessage() {} -func (*BytesValue) Descriptor() ([]byte, []int) { - return fileDescriptor_5377b62bda767935, []int{8} -} - -func (*BytesValue) XXX_WellKnownType() string { return "BytesValue" } - -func (m *BytesValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BytesValue.Unmarshal(m, b) -} -func (m *BytesValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BytesValue.Marshal(b, m, deterministic) -} -func (m *BytesValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_BytesValue.Merge(m, src) -} -func (m *BytesValue) XXX_Size() int { - return xxx_messageInfo_BytesValue.Size(m) -} -func (m *BytesValue) XXX_DiscardUnknown() { - xxx_messageInfo_BytesValue.DiscardUnknown(m) -} - -var xxx_messageInfo_BytesValue proto.InternalMessageInfo - -func (m *BytesValue) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -func init() { - proto.RegisterType((*DoubleValue)(nil), "google.protobuf.DoubleValue") - proto.RegisterType((*FloatValue)(nil), "google.protobuf.FloatValue") - proto.RegisterType((*Int64Value)(nil), "google.protobuf.Int64Value") - proto.RegisterType((*UInt64Value)(nil), "google.protobuf.UInt64Value") - proto.RegisterType((*Int32Value)(nil), "google.protobuf.Int32Value") - proto.RegisterType((*UInt32Value)(nil), "google.protobuf.UInt32Value") - proto.RegisterType((*BoolValue)(nil), "google.protobuf.BoolValue") - proto.RegisterType((*StringValue)(nil), "google.protobuf.StringValue") - proto.RegisterType((*BytesValue)(nil), "google.protobuf.BytesValue") -} - -func init() { proto.RegisterFile("google/protobuf/wrappers.proto", fileDescriptor_5377b62bda767935) } - -var fileDescriptor_5377b62bda767935 = []byte{ - // 259 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x2f, 0x4a, 0x2c, - 0x28, 0x48, 0x2d, 0x2a, 0xd6, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0xca, - 0x5c, 0xdc, 0x2e, 0xf9, 0xa5, 0x49, 0x39, 0xa9, 0x61, 0x89, 0x39, 0xa5, 0xa9, 0x42, 0x22, 0x5c, - 0xac, 0x65, 0x20, 0x86, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x63, 0x10, 0x84, 0xa3, 0xa4, 0xc4, 0xc5, - 0xe5, 0x96, 0x93, 0x9f, 0x58, 0x82, 0x45, 0x0d, 0x13, 0x92, 0x1a, 0xcf, 0xbc, 0x12, 0x33, 0x13, - 0x2c, 0x6a, 0x98, 0x61, 0x6a, 0x94, 0xb9, 0xb8, 0x43, 0x71, 0x29, 0x62, 0x41, 0x35, 0xc8, 0xd8, - 0x08, 0x8b, 0x1a, 0x56, 0x34, 0x83, 0xb0, 0x2a, 0xe2, 0x85, 0x29, 0x52, 0xe4, 0xe2, 0x74, 0xca, - 0xcf, 0xcf, 0xc1, 0xa2, 0x84, 0x03, 0xc9, 0x9c, 0xe0, 0x92, 0xa2, 0xcc, 0xbc, 0x74, 0x2c, 0x8a, - 0x38, 0x91, 0x1c, 0xe4, 0x54, 0x59, 0x92, 0x5a, 0x8c, 0x45, 0x0d, 0x0f, 0x54, 0x8d, 0x53, 0x0d, - 0x97, 0x70, 0x72, 0x7e, 0xae, 0x1e, 0x5a, 0xe8, 0x3a, 0xf1, 0x86, 0x43, 0x83, 0x3f, 0x00, 0x24, - 0x12, 0xc0, 0x18, 0xa5, 0x95, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0x9f, - 0x9e, 0x9f, 0x93, 0x98, 0x97, 0x8e, 0x88, 0xaa, 0x82, 0x92, 0xca, 0x82, 0xd4, 0x62, 0x78, 0x8c, - 0xfd, 0x60, 0x64, 0x5c, 0xc4, 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, - 0x00, 0x54, 0xa9, 0x5e, 0x78, 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, - 0x12, 0x1b, 0xd8, 0x0c, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x19, 0x6c, 0xb9, 0xb8, 0xfe, - 0x01, 0x00, 0x00, -} diff --git a/cmd/bpa-operator/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto b/cmd/bpa-operator/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto deleted file mode 100644 index 0194763..0000000 --- a/cmd/bpa-operator/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto +++ /dev/null @@ -1,118 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Wrappers for primitive (non-message) types. These types are useful -// for embedding primitives in the `google.protobuf.Any` type and for places -// where we need to distinguish between the absence of a primitive -// typed field and its default value. - -syntax = "proto3"; - -package google.protobuf; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option cc_enable_arenas = true; -option go_package = "github.com/golang/protobuf/ptypes/wrappers"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "WrappersProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; - -// Wrapper message for `double`. -// -// The JSON representation for `DoubleValue` is JSON number. -message DoubleValue { - // The double value. - double value = 1; -} - -// Wrapper message for `float`. -// -// The JSON representation for `FloatValue` is JSON number. -message FloatValue { - // The float value. - float value = 1; -} - -// Wrapper message for `int64`. -// -// The JSON representation for `Int64Value` is JSON string. -message Int64Value { - // The int64 value. - int64 value = 1; -} - -// Wrapper message for `uint64`. -// -// The JSON representation for `UInt64Value` is JSON string. -message UInt64Value { - // The uint64 value. - uint64 value = 1; -} - -// Wrapper message for `int32`. -// -// The JSON representation for `Int32Value` is JSON number. -message Int32Value { - // The int32 value. - int32 value = 1; -} - -// Wrapper message for `uint32`. -// -// The JSON representation for `UInt32Value` is JSON number. -message UInt32Value { - // The uint32 value. - uint32 value = 1; -} - -// Wrapper message for `bool`. -// -// The JSON representation for `BoolValue` is JSON `true` and `false`. -message BoolValue { - // The bool value. - bool value = 1; -} - -// Wrapper message for `string`. -// -// The JSON representation for `StringValue` is JSON string. -message StringValue { - // The string value. - string value = 1; -} - -// Wrapper message for `bytes`. -// -// The JSON representation for `BytesValue` is JSON string. -message BytesValue { - // The bytes value. - bytes value = 1; -} diff --git a/cmd/bpa-operator/vendor/github.com/google/btree/.travis.yml b/cmd/bpa-operator/vendor/github.com/google/btree/.travis.yml deleted file mode 100644 index 4f2ee4d..0000000 --- a/cmd/bpa-operator/vendor/github.com/google/btree/.travis.yml +++ /dev/null @@ -1 +0,0 @@ -language: go diff --git a/cmd/bpa-operator/vendor/github.com/google/btree/LICENSE b/cmd/bpa-operator/vendor/github.com/google/btree/LICENSE deleted file mode 100644 index d645695..0000000 --- a/cmd/bpa-operator/vendor/github.com/google/btree/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/cmd/bpa-operator/vendor/github.com/google/btree/README.md b/cmd/bpa-operator/vendor/github.com/google/btree/README.md deleted file mode 100644 index 6062a4d..0000000 --- a/cmd/bpa-operator/vendor/github.com/google/btree/README.md +++ /dev/null @@ -1,12 +0,0 @@ -# BTree implementation for Go - -![Travis CI Build Status](https://api.travis-ci.org/google/btree.svg?branch=master) - -This package provides an in-memory B-Tree implementation for Go, useful as -an ordered, mutable data structure. - -The API is based off of the wonderful -http://godoc.org/github.com/petar/GoLLRB/llrb, and is meant to allow btree to -act as a drop-in replacement for gollrb trees. - -See http://godoc.org/github.com/google/btree for documentation. diff --git a/cmd/bpa-operator/vendor/github.com/google/btree/btree.go b/cmd/bpa-operator/vendor/github.com/google/btree/btree.go deleted file mode 100644 index 6ff062f..0000000 --- a/cmd/bpa-operator/vendor/github.com/google/btree/btree.go +++ /dev/null @@ -1,890 +0,0 @@ -// Copyright 2014 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package btree implements in-memory B-Trees of arbitrary degree. -// -// btree implements an in-memory B-Tree for use as an ordered data structure. -// It is not meant for persistent storage solutions. -// -// It has a flatter structure than an equivalent red-black or other binary tree, -// which in some cases yields better memory usage and/or performance. -// See some discussion on the matter here: -// http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html -// Note, though, that this project is in no way related to the C++ B-Tree -// implementation written about there. -// -// Within this tree, each node contains a slice of items and a (possibly nil) -// slice of children. For basic numeric values or raw structs, this can cause -// efficiency differences when compared to equivalent C++ template code that -// stores values in arrays within the node: -// * Due to the overhead of storing values as interfaces (each -// value needs to be stored as the value itself, then 2 words for the -// interface pointing to that value and its type), resulting in higher -// memory use. -// * Since interfaces can point to values anywhere in memory, values are -// most likely not stored in contiguous blocks, resulting in a higher -// number of cache misses. -// These issues don't tend to matter, though, when working with strings or other -// heap-allocated structures, since C++-equivalent structures also must store -// pointers and also distribute their values across the heap. -// -// This implementation is designed to be a drop-in replacement to gollrb.LLRB -// trees, (http://github.com/petar/gollrb), an excellent and probably the most -// widely used ordered tree implementation in the Go ecosystem currently. -// Its functions, therefore, exactly mirror those of -// llrb.LLRB where possible. Unlike gollrb, though, we currently don't -// support storing multiple equivalent values. -package btree - -import ( - "fmt" - "io" - "sort" - "strings" - "sync" -) - -// Item represents a single object in the tree. -type Item interface { - // Less tests whether the current item is less than the given argument. - // - // This must provide a strict weak ordering. - // If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only - // hold one of either a or b in the tree). - Less(than Item) bool -} - -const ( - DefaultFreeListSize = 32 -) - -var ( - nilItems = make(items, 16) - nilChildren = make(children, 16) -) - -// FreeList represents a free list of btree nodes. By default each -// BTree has its own FreeList, but multiple BTrees can share the same -// FreeList. -// Two Btrees using the same freelist are safe for concurrent write access. -type FreeList struct { - mu sync.Mutex - freelist []*node -} - -// NewFreeList creates a new free list. -// size is the maximum size of the returned free list. -func NewFreeList(size int) *FreeList { - return &FreeList{freelist: make([]*node, 0, size)} -} - -func (f *FreeList) newNode() (n *node) { - f.mu.Lock() - index := len(f.freelist) - 1 - if index < 0 { - f.mu.Unlock() - return new(node) - } - n = f.freelist[index] - f.freelist[index] = nil - f.freelist = f.freelist[:index] - f.mu.Unlock() - return -} - -// freeNode adds the given node to the list, returning true if it was added -// and false if it was discarded. -func (f *FreeList) freeNode(n *node) (out bool) { - f.mu.Lock() - if len(f.freelist) < cap(f.freelist) { - f.freelist = append(f.freelist, n) - out = true - } - f.mu.Unlock() - return -} - -// ItemIterator allows callers of Ascend* to iterate in-order over portions of -// the tree. When this function returns false, iteration will stop and the -// associated Ascend* function will immediately return. -type ItemIterator func(i Item) bool - -// New creates a new B-Tree with the given degree. -// -// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items -// and 2-4 children). -func New(degree int) *BTree { - return NewWithFreeList(degree, NewFreeList(DefaultFreeListSize)) -} - -// NewWithFreeList creates a new B-Tree that uses the given node free list. -func NewWithFreeList(degree int, f *FreeList) *BTree { - if degree <= 1 { - panic("bad degree") - } - return &BTree{ - degree: degree, - cow: ©OnWriteContext{freelist: f}, - } -} - -// items stores items in a node. -type items []Item - -// insertAt inserts a value into the given index, pushing all subsequent values -// forward. -func (s *items) insertAt(index int, item Item) { - *s = append(*s, nil) - if index < len(*s) { - copy((*s)[index+1:], (*s)[index:]) - } - (*s)[index] = item -} - -// removeAt removes a value at a given index, pulling all subsequent values -// back. -func (s *items) removeAt(index int) Item { - item := (*s)[index] - copy((*s)[index:], (*s)[index+1:]) - (*s)[len(*s)-1] = nil - *s = (*s)[:len(*s)-1] - return item -} - -// pop removes and returns the last element in the list. -func (s *items) pop() (out Item) { - index := len(*s) - 1 - out = (*s)[index] - (*s)[index] = nil - *s = (*s)[:index] - return -} - -// truncate truncates this instance at index so that it contains only the -// first index items. index must be less than or equal to length. -func (s *items) truncate(index int) { - var toClear items - *s, toClear = (*s)[:index], (*s)[index:] - for len(toClear) > 0 { - toClear = toClear[copy(toClear, nilItems):] - } -} - -// find returns the index where the given item should be inserted into this -// list. 'found' is true if the item already exists in the list at the given -// index. -func (s items) find(item Item) (index int, found bool) { - i := sort.Search(len(s), func(i int) bool { - return item.Less(s[i]) - }) - if i > 0 && !s[i-1].Less(item) { - return i - 1, true - } - return i, false -} - -// children stores child nodes in a node. -type children []*node - -// insertAt inserts a value into the given index, pushing all subsequent values -// forward. -func (s *children) insertAt(index int, n *node) { - *s = append(*s, nil) - if index < len(*s) { - copy((*s)[index+1:], (*s)[index:]) - } - (*s)[index] = n -} - -// removeAt removes a value at a given index, pulling all subsequent values -// back. -func (s *children) removeAt(index int) *node { - n := (*s)[index] - copy((*s)[index:], (*s)[index+1:]) - (*s)[len(*s)-1] = nil - *s = (*s)[:len(*s)-1] - return n -} - -// pop removes and returns the last element in the list. -func (s *children) pop() (out *node) { - index := len(*s) - 1 - out = (*s)[index] - (*s)[index] = nil - *s = (*s)[:index] - return -} - -// truncate truncates this instance at index so that it contains only the -// first index children. index must be less than or equal to length. -func (s *children) truncate(index int) { - var toClear children - *s, toClear = (*s)[:index], (*s)[index:] - for len(toClear) > 0 { - toClear = toClear[copy(toClear, nilChildren):] - } -} - -// node is an internal node in a tree. -// -// It must at all times maintain the invariant that either -// * len(children) == 0, len(items) unconstrained -// * len(children) == len(items) + 1 -type node struct { - items items - children children - cow *copyOnWriteContext -} - -func (n *node) mutableFor(cow *copyOnWriteContext) *node { - if n.cow == cow { - return n - } - out := cow.newNode() - if cap(out.items) >= len(n.items) { - out.items = out.items[:len(n.items)] - } else { - out.items = make(items, len(n.items), cap(n.items)) - } - copy(out.items, n.items) - // Copy children - if cap(out.children) >= len(n.children) { - out.children = out.children[:len(n.children)] - } else { - out.children = make(children, len(n.children), cap(n.children)) - } - copy(out.children, n.children) - return out -} - -func (n *node) mutableChild(i int) *node { - c := n.children[i].mutableFor(n.cow) - n.children[i] = c - return c -} - -// split splits the given node at the given index. The current node shrinks, -// and this function returns the item that existed at that index and a new node -// containing all items/children after it. -func (n *node) split(i int) (Item, *node) { - item := n.items[i] - next := n.cow.newNode() - next.items = append(next.items, n.items[i+1:]...) - n.items.truncate(i) - if len(n.children) > 0 { - next.children = append(next.children, n.children[i+1:]...) - n.children.truncate(i + 1) - } - return item, next -} - -// maybeSplitChild checks if a child should be split, and if so splits it. -// Returns whether or not a split occurred. -func (n *node) maybeSplitChild(i, maxItems int) bool { - if len(n.children[i].items) < maxItems { - return false - } - first := n.mutableChild(i) - item, second := first.split(maxItems / 2) - n.items.insertAt(i, item) - n.children.insertAt(i+1, second) - return true -} - -// insert inserts an item into the subtree rooted at this node, making sure -// no nodes in the subtree exceed maxItems items. Should an equivalent item be -// be found/replaced by insert, it will be returned. -func (n *node) insert(item Item, maxItems int) Item { - i, found := n.items.find(item) - if found { - out := n.items[i] - n.items[i] = item - return out - } - if len(n.children) == 0 { - n.items.insertAt(i, item) - return nil - } - if n.maybeSplitChild(i, maxItems) { - inTree := n.items[i] - switch { - case item.Less(inTree): - // no change, we want first split node - case inTree.Less(item): - i++ // we want second split node - default: - out := n.items[i] - n.items[i] = item - return out - } - } - return n.mutableChild(i).insert(item, maxItems) -} - -// get finds the given key in the subtree and returns it. -func (n *node) get(key Item) Item { - i, found := n.items.find(key) - if found { - return n.items[i] - } else if len(n.children) > 0 { - return n.children[i].get(key) - } - return nil -} - -// min returns the first item in the subtree. -func min(n *node) Item { - if n == nil { - return nil - } - for len(n.children) > 0 { - n = n.children[0] - } - if len(n.items) == 0 { - return nil - } - return n.items[0] -} - -// max returns the last item in the subtree. -func max(n *node) Item { - if n == nil { - return nil - } - for len(n.children) > 0 { - n = n.children[len(n.children)-1] - } - if len(n.items) == 0 { - return nil - } - return n.items[len(n.items)-1] -} - -// toRemove details what item to remove in a node.remove call. -type toRemove int - -const ( - removeItem toRemove = iota // removes the given item - removeMin // removes smallest item in the subtree - removeMax // removes largest item in the subtree -) - -// remove removes an item from the subtree rooted at this node. -func (n *node) remove(item Item, minItems int, typ toRemove) Item { - var i int - var found bool - switch typ { - case removeMax: - if len(n.children) == 0 { - return n.items.pop() - } - i = len(n.items) - case removeMin: - if len(n.children) == 0 { - return n.items.removeAt(0) - } - i = 0 - case removeItem: - i, found = n.items.find(item) - if len(n.children) == 0 { - if found { - return n.items.removeAt(i) - } - return nil - } - default: - panic("invalid type") - } - // If we get to here, we have children. - if len(n.children[i].items) <= minItems { - return n.growChildAndRemove(i, item, minItems, typ) - } - child := n.mutableChild(i) - // Either we had enough items to begin with, or we've done some - // merging/stealing, because we've got enough now and we're ready to return - // stuff. - if found { - // The item exists at index 'i', and the child we've selected can give us a - // predecessor, since if we've gotten here it's got > minItems items in it. - out := n.items[i] - // We use our special-case 'remove' call with typ=maxItem to pull the - // predecessor of item i (the rightmost leaf of our immediate left child) - // and set it into where we pulled the item from. - n.items[i] = child.remove(nil, minItems, removeMax) - return out - } - // Final recursive call. Once we're here, we know that the item isn't in this - // node and that the child is big enough to remove from. - return child.remove(item, minItems, typ) -} - -// growChildAndRemove grows child 'i' to make sure it's possible to remove an -// item from it while keeping it at minItems, then calls remove to actually -// remove it. -// -// Most documentation says we have to do two sets of special casing: -// 1) item is in this node -// 2) item is in child -// In both cases, we need to handle the two subcases: -// A) node has enough values that it can spare one -// B) node doesn't have enough values -// For the latter, we have to check: -// a) left sibling has node to spare -// b) right sibling has node to spare -// c) we must merge -// To simplify our code here, we handle cases #1 and #2 the same: -// If a node doesn't have enough items, we make sure it does (using a,b,c). -// We then simply redo our remove call, and the second time (regardless of -// whether we're in case 1 or 2), we'll have enough items and can guarantee -// that we hit case A. -func (n *node) growChildAndRemove(i int, item Item, minItems int, typ toRemove) Item { - if i > 0 && len(n.children[i-1].items) > minItems { - // Steal from left child - child := n.mutableChild(i) - stealFrom := n.mutableChild(i - 1) - stolenItem := stealFrom.items.pop() - child.items.insertAt(0, n.items[i-1]) - n.items[i-1] = stolenItem - if len(stealFrom.children) > 0 { - child.children.insertAt(0, stealFrom.children.pop()) - } - } else if i < len(n.items) && len(n.children[i+1].items) > minItems { - // steal from right child - child := n.mutableChild(i) - stealFrom := n.mutableChild(i + 1) - stolenItem := stealFrom.items.removeAt(0) - child.items = append(child.items, n.items[i]) - n.items[i] = stolenItem - if len(stealFrom.children) > 0 { - child.children = append(child.children, stealFrom.children.removeAt(0)) - } - } else { - if i >= len(n.items) { - i-- - } - child := n.mutableChild(i) - // merge with right child - mergeItem := n.items.removeAt(i) - mergeChild := n.children.removeAt(i + 1) - child.items = append(child.items, mergeItem) - child.items = append(child.items, mergeChild.items...) - child.children = append(child.children, mergeChild.children...) - n.cow.freeNode(mergeChild) - } - return n.remove(item, minItems, typ) -} - -type direction int - -const ( - descend = direction(-1) - ascend = direction(+1) -) - -// iterate provides a simple method for iterating over elements in the tree. -// -// When ascending, the 'start' should be less than 'stop' and when descending, -// the 'start' should be greater than 'stop'. Setting 'includeStart' to true -// will force the iterator to include the first item when it equals 'start', -// thus creating a "greaterOrEqual" or "lessThanEqual" rather than just a -// "greaterThan" or "lessThan" queries. -func (n *node) iterate(dir direction, start, stop Item, includeStart bool, hit bool, iter ItemIterator) (bool, bool) { - var ok, found bool - var index int - switch dir { - case ascend: - if start != nil { - index, _ = n.items.find(start) - } - for i := index; i < len(n.items); i++ { - if len(n.children) > 0 { - if hit, ok = n.children[i].iterate(dir, start, stop, includeStart, hit, iter); !ok { - return hit, false - } - } - if !includeStart && !hit && start != nil && !start.Less(n.items[i]) { - hit = true - continue - } - hit = true - if stop != nil && !n.items[i].Less(stop) { - return hit, false - } - if !iter(n.items[i]) { - return hit, false - } - } - if len(n.children) > 0 { - if hit, ok = n.children[len(n.children)-1].iterate(dir, start, stop, includeStart, hit, iter); !ok { - return hit, false - } - } - case descend: - if start != nil { - index, found = n.items.find(start) - if !found { - index = index - 1 - } - } else { - index = len(n.items) - 1 - } - for i := index; i >= 0; i-- { - if start != nil && !n.items[i].Less(start) { - if !includeStart || hit || start.Less(n.items[i]) { - continue - } - } - if len(n.children) > 0 { - if hit, ok = n.children[i+1].iterate(dir, start, stop, includeStart, hit, iter); !ok { - return hit, false - } - } - if stop != nil && !stop.Less(n.items[i]) { - return hit, false // continue - } - hit = true - if !iter(n.items[i]) { - return hit, false - } - } - if len(n.children) > 0 { - if hit, ok = n.children[0].iterate(dir, start, stop, includeStart, hit, iter); !ok { - return hit, false - } - } - } - return hit, true -} - -// Used for testing/debugging purposes. -func (n *node) print(w io.Writer, level int) { - fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat(" ", level), n.items) - for _, c := range n.children { - c.print(w, level+1) - } -} - -// BTree is an implementation of a B-Tree. -// -// BTree stores Item instances in an ordered structure, allowing easy insertion, -// removal, and iteration. -// -// Write operations are not safe for concurrent mutation by multiple -// goroutines, but Read operations are. -type BTree struct { - degree int - length int - root *node - cow *copyOnWriteContext -} - -// copyOnWriteContext pointers determine node ownership... a tree with a write -// context equivalent to a node's write context is allowed to modify that node. -// A tree whose write context does not match a node's is not allowed to modify -// it, and must create a new, writable copy (IE: it's a Clone). -// -// When doing any write operation, we maintain the invariant that the current -// node's context is equal to the context of the tree that requested the write. -// We do this by, before we descend into any node, creating a copy with the -// correct context if the contexts don't match. -// -// Since the node we're currently visiting on any write has the requesting -// tree's context, that node is modifiable in place. Children of that node may -// not share context, but before we descend into them, we'll make a mutable -// copy. -type copyOnWriteContext struct { - freelist *FreeList -} - -// Clone clones the btree, lazily. Clone should not be called concurrently, -// but the original tree (t) and the new tree (t2) can be used concurrently -// once the Clone call completes. -// -// The internal tree structure of b is marked read-only and shared between t and -// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes -// whenever one of b's original nodes would have been modified. Read operations -// should have no performance degredation. Write operations for both t and t2 -// will initially experience minor slow-downs caused by additional allocs and -// copies due to the aforementioned copy-on-write logic, but should converge to -// the original performance characteristics of the original tree. -func (t *BTree) Clone() (t2 *BTree) { - // Create two entirely new copy-on-write contexts. - // This operation effectively creates three trees: - // the original, shared nodes (old b.cow) - // the new b.cow nodes - // the new out.cow nodes - cow1, cow2 := *t.cow, *t.cow - out := *t - t.cow = &cow1 - out.cow = &cow2 - return &out -} - -// maxItems returns the max number of items to allow per node. -func (t *BTree) maxItems() int { - return t.degree*2 - 1 -} - -// minItems returns the min number of items to allow per node (ignored for the -// root node). -func (t *BTree) minItems() int { - return t.degree - 1 -} - -func (c *copyOnWriteContext) newNode() (n *node) { - n = c.freelist.newNode() - n.cow = c - return -} - -type freeType int - -const ( - ftFreelistFull freeType = iota // node was freed (available for GC, not stored in freelist) - ftStored // node was stored in the freelist for later use - ftNotOwned // node was ignored by COW, since it's owned by another one -) - -// freeNode frees a node within a given COW context, if it's owned by that -// context. It returns what happened to the node (see freeType const -// documentation). -func (c *copyOnWriteContext) freeNode(n *node) freeType { - if n.cow == c { - // clear to allow GC - n.items.truncate(0) - n.children.truncate(0) - n.cow = nil - if c.freelist.freeNode(n) { - return ftStored - } else { - return ftFreelistFull - } - } else { - return ftNotOwned - } -} - -// ReplaceOrInsert adds the given item to the tree. If an item in the tree -// already equals the given one, it is removed from the tree and returned. -// Otherwise, nil is returned. -// -// nil cannot be added to the tree (will panic). -func (t *BTree) ReplaceOrInsert(item Item) Item { - if item == nil { - panic("nil item being added to BTree") - } - if t.root == nil { - t.root = t.cow.newNode() - t.root.items = append(t.root.items, item) - t.length++ - return nil - } else { - t.root = t.root.mutableFor(t.cow) - if len(t.root.items) >= t.maxItems() { - item2, second := t.root.split(t.maxItems() / 2) - oldroot := t.root - t.root = t.cow.newNode() - t.root.items = append(t.root.items, item2) - t.root.children = append(t.root.children, oldroot, second) - } - } - out := t.root.insert(item, t.maxItems()) - if out == nil { - t.length++ - } - return out -} - -// Delete removes an item equal to the passed in item from the tree, returning -// it. If no such item exists, returns nil. -func (t *BTree) Delete(item Item) Item { - return t.deleteItem(item, removeItem) -} - -// DeleteMin removes the smallest item in the tree and returns it. -// If no such item exists, returns nil. -func (t *BTree) DeleteMin() Item { - return t.deleteItem(nil, removeMin) -} - -// DeleteMax removes the largest item in the tree and returns it. -// If no such item exists, returns nil. -func (t *BTree) DeleteMax() Item { - return t.deleteItem(nil, removeMax) -} - -func (t *BTree) deleteItem(item Item, typ toRemove) Item { - if t.root == nil || len(t.root.items) == 0 { - return nil - } - t.root = t.root.mutableFor(t.cow) - out := t.root.remove(item, t.minItems(), typ) - if len(t.root.items) == 0 && len(t.root.children) > 0 { - oldroot := t.root - t.root = t.root.children[0] - t.cow.freeNode(oldroot) - } - if out != nil { - t.length-- - } - return out -} - -// AscendRange calls the iterator for every value in the tree within the range -// [greaterOrEqual, lessThan), until iterator returns false. -func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(ascend, greaterOrEqual, lessThan, true, false, iterator) -} - -// AscendLessThan calls the iterator for every value in the tree within the range -// [first, pivot), until iterator returns false. -func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(ascend, nil, pivot, false, false, iterator) -} - -// AscendGreaterOrEqual calls the iterator for every value in the tree within -// the range [pivot, last], until iterator returns false. -func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(ascend, pivot, nil, true, false, iterator) -} - -// Ascend calls the iterator for every value in the tree within the range -// [first, last], until iterator returns false. -func (t *BTree) Ascend(iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(ascend, nil, nil, false, false, iterator) -} - -// DescendRange calls the iterator for every value in the tree within the range -// [lessOrEqual, greaterThan), until iterator returns false. -func (t *BTree) DescendRange(lessOrEqual, greaterThan Item, iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(descend, lessOrEqual, greaterThan, true, false, iterator) -} - -// DescendLessOrEqual calls the iterator for every value in the tree within the range -// [pivot, first], until iterator returns false. -func (t *BTree) DescendLessOrEqual(pivot Item, iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(descend, pivot, nil, true, false, iterator) -} - -// DescendGreaterThan calls the iterator for every value in the tree within -// the range (pivot, last], until iterator returns false. -func (t *BTree) DescendGreaterThan(pivot Item, iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(descend, nil, pivot, false, false, iterator) -} - -// Descend calls the iterator for every value in the tree within the range -// [last, first], until iterator returns false. -func (t *BTree) Descend(iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(descend, nil, nil, false, false, iterator) -} - -// Get looks for the key item in the tree, returning it. It returns nil if -// unable to find that item. -func (t *BTree) Get(key Item) Item { - if t.root == nil { - return nil - } - return t.root.get(key) -} - -// Min returns the smallest item in the tree, or nil if the tree is empty. -func (t *BTree) Min() Item { - return min(t.root) -} - -// Max returns the largest item in the tree, or nil if the tree is empty. -func (t *BTree) Max() Item { - return max(t.root) -} - -// Has returns true if the given key is in the tree. -func (t *BTree) Has(key Item) bool { - return t.Get(key) != nil -} - -// Len returns the number of items currently in the tree. -func (t *BTree) Len() int { - return t.length -} - -// Clear removes all items from the btree. If addNodesToFreelist is true, -// t's nodes are added to its freelist as part of this call, until the freelist -// is full. Otherwise, the root node is simply dereferenced and the subtree -// left to Go's normal GC processes. -// -// This can be much faster -// than calling Delete on all elements, because that requires finding/removing -// each element in the tree and updating the tree accordingly. It also is -// somewhat faster than creating a new tree to replace the old one, because -// nodes from the old tree are reclaimed into the freelist for use by the new -// one, instead of being lost to the garbage collector. -// -// This call takes: -// O(1): when addNodesToFreelist is false, this is a single operation. -// O(1): when the freelist is already full, it breaks out immediately -// O(freelist size): when the freelist is empty and the nodes are all owned -// by this tree, nodes are added to the freelist until full. -// O(tree size): when all nodes are owned by another tree, all nodes are -// iterated over looking for nodes to add to the freelist, and due to -// ownership, none are. -func (t *BTree) Clear(addNodesToFreelist bool) { - if t.root != nil && addNodesToFreelist { - t.root.reset(t.cow) - } - t.root, t.length = nil, 0 -} - -// reset returns a subtree to the freelist. It breaks out immediately if the -// freelist is full, since the only benefit of iterating is to fill that -// freelist up. Returns true if parent reset call should continue. -func (n *node) reset(c *copyOnWriteContext) bool { - for _, child := range n.children { - if !child.reset(c) { - return false - } - } - return c.freeNode(n) != ftFreelistFull -} - -// Int implements the Item interface for integers. -type Int int - -// Less returns true if int(a) < int(b). -func (a Int) Less(b Item) bool { - return a < b.(Int) -} diff --git a/cmd/bpa-operator/vendor/github.com/google/btree/btree_mem.go b/cmd/bpa-operator/vendor/github.com/google/btree/btree_mem.go deleted file mode 100644 index cb95b7f..0000000 --- a/cmd/bpa-operator/vendor/github.com/google/btree/btree_mem.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2014 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build ignore - -// This binary compares memory usage between btree and gollrb. -package main - -import ( - "flag" - "fmt" - "math/rand" - "runtime" - "time" - - "github.com/google/btree" - "github.com/petar/GoLLRB/llrb" -) - -var ( - size = flag.Int("size", 1000000, "size of the tree to build") - degree = flag.Int("degree", 8, "degree of btree") - gollrb = flag.Bool("llrb", false, "use llrb instead of btree") -) - -func main() { - flag.Parse() - vals := rand.Perm(*size) - var t, v interface{} - v = vals - var stats runtime.MemStats - for i := 0; i < 10; i++ { - runtime.GC() - } - fmt.Println("-------- BEFORE ----------") - runtime.ReadMemStats(&stats) - fmt.Printf("%+v\n", stats) - start := time.Now() - if *gollrb { - tr := llrb.New() - for _, v := range vals { - tr.ReplaceOrInsert(llrb.Int(v)) - } - t = tr // keep it around - } else { - tr := btree.New(*degree) - for _, v := range vals { - tr.ReplaceOrInsert(btree.Int(v)) - } - t = tr // keep it around - } - fmt.Printf("%v inserts in %v\n", *size, time.Since(start)) - fmt.Println("-------- AFTER ----------") - runtime.ReadMemStats(&stats) - fmt.Printf("%+v\n", stats) - for i := 0; i < 10; i++ { - runtime.GC() - } - fmt.Println("-------- AFTER GC ----------") - runtime.ReadMemStats(&stats) - fmt.Printf("%+v\n", stats) - if t == v { - fmt.Println("to make sure vals and tree aren't GC'd") - } -} diff --git a/cmd/bpa-operator/vendor/github.com/google/gofuzz/.travis.yml b/cmd/bpa-operator/vendor/github.com/google/gofuzz/.travis.yml deleted file mode 100644 index f8684d9..0000000 --- a/cmd/bpa-operator/vendor/github.com/google/gofuzz/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -language: go - -go: - - 1.4 - - 1.3 - - 1.2 - - tip - -install: - - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi - -script: - - go test -cover diff --git a/cmd/bpa-operator/vendor/github.com/google/gofuzz/CONTRIBUTING.md b/cmd/bpa-operator/vendor/github.com/google/gofuzz/CONTRIBUTING.md deleted file mode 100644 index 51cf5cd..0000000 --- a/cmd/bpa-operator/vendor/github.com/google/gofuzz/CONTRIBUTING.md +++ /dev/null @@ -1,67 +0,0 @@ -# How to contribute # - -We'd love to accept your patches and contributions to this project. There are -a just a few small guidelines you need to follow. - - -## Contributor License Agreement ## - -Contributions to any Google project must be accompanied by a Contributor -License Agreement. This is not a copyright **assignment**, it simply gives -Google permission to use and redistribute your contributions as part of the -project. - - * If you are an individual writing original source code and you're sure you - own the intellectual property, then you'll need to sign an [individual - CLA][]. - - * If you work for a company that wants to allow you to contribute your work, - then you'll need to sign a [corporate CLA][]. - -You generally only need to submit a CLA once, so if you've already submitted -one (even if it was for a different project), you probably don't need to do it -again. - -[individual CLA]: https://developers.google.com/open-source/cla/individual -[corporate CLA]: https://developers.google.com/open-source/cla/corporate - - -## Submitting a patch ## - - 1. It's generally best to start by opening a new issue describing the bug or - feature you're intending to fix. Even if you think it's relatively minor, - it's helpful to know what people are working on. Mention in the initial - issue that you are planning to work on that bug or feature so that it can - be assigned to you. - - 1. Follow the normal process of [forking][] the project, and setup a new - branch to work in. It's important that each group of changes be done in - separate branches in order to ensure that a pull request only includes the - commits related to that bug or feature. - - 1. Go makes it very simple to ensure properly formatted code, so always run - `go fmt` on your code before committing it. You should also run - [golint][] over your code. As noted in the [golint readme][], it's not - strictly necessary that your code be completely "lint-free", but this will - help you find common style issues. - - 1. Any significant changes should almost always be accompanied by tests. The - project already has good test coverage, so look at some of the existing - tests if you're unsure how to go about it. [gocov][] and [gocov-html][] - are invaluable tools for seeing which parts of your code aren't being - exercised by your tests. - - 1. Do your best to have [well-formed commit messages][] for each change. - This provides consistency throughout the project, and ensures that commit - messages are able to be formatted properly by various git tools. - - 1. Finally, push the commits to your fork and submit a [pull request][]. - -[forking]: https://help.github.com/articles/fork-a-repo -[golint]: https://github.com/golang/lint -[golint readme]: https://github.com/golang/lint/blob/master/README -[gocov]: https://github.com/axw/gocov -[gocov-html]: https://github.com/matm/gocov-html -[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html -[squash]: http://git-scm.com/book/en/Git-Tools-Rewriting-History#Squashing-Commits -[pull request]: https://help.github.com/articles/creating-a-pull-request diff --git a/cmd/bpa-operator/vendor/github.com/google/gofuzz/LICENSE b/cmd/bpa-operator/vendor/github.com/google/gofuzz/LICENSE deleted file mode 100644 index d645695..0000000 --- a/cmd/bpa-operator/vendor/github.com/google/gofuzz/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/cmd/bpa-operator/vendor/github.com/google/gofuzz/README.md b/cmd/bpa-operator/vendor/github.com/google/gofuzz/README.md deleted file mode 100644 index 64869af..0000000 --- a/cmd/bpa-operator/vendor/github.com/google/gofuzz/README.md +++ /dev/null @@ -1,71 +0,0 @@ -gofuzz -====== - -gofuzz is a library for populating go objects with random values. - -[![GoDoc](https://godoc.org/github.com/google/gofuzz?status.png)](https://godoc.org/github.com/google/gofuzz) -[![Travis](https://travis-ci.org/google/gofuzz.svg?branch=master)](https://travis-ci.org/google/gofuzz) - -This is useful for testing: - -* Do your project's objects really serialize/unserialize correctly in all cases? -* Is there an incorrectly formatted object that will cause your project to panic? - -Import with ```import "github.com/google/gofuzz"``` - -You can use it on single variables: -```go -f := fuzz.New() -var myInt int -f.Fuzz(&myInt) // myInt gets a random value. -``` - -You can use it on maps: -```go -f := fuzz.New().NilChance(0).NumElements(1, 1) -var myMap map[ComplexKeyType]string -f.Fuzz(&myMap) // myMap will have exactly one element. -``` - -Customize the chance of getting a nil pointer: -```go -f := fuzz.New().NilChance(.5) -var fancyStruct struct { - A, B, C, D *string -} -f.Fuzz(&fancyStruct) // About half the pointers should be set. -``` - -You can even customize the randomization completely if needed: -```go -type MyEnum string -const ( - A MyEnum = "A" - B MyEnum = "B" -) -type MyInfo struct { - Type MyEnum - AInfo *string - BInfo *string -} - -f := fuzz.New().NilChance(0).Funcs( - func(e *MyInfo, c fuzz.Continue) { - switch c.Intn(2) { - case 0: - e.Type = A - c.Fuzz(&e.AInfo) - case 1: - e.Type = B - c.Fuzz(&e.BInfo) - } - }, -) - -var myObject MyInfo -f.Fuzz(&myObject) // Type will correspond to whether A or B info is set. -``` - -See more examples in ```example_test.go```. - -Happy testing! diff --git a/cmd/bpa-operator/vendor/github.com/google/gofuzz/doc.go b/cmd/bpa-operator/vendor/github.com/google/gofuzz/doc.go deleted file mode 100644 index 9f9956d..0000000 --- a/cmd/bpa-operator/vendor/github.com/google/gofuzz/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2014 Google Inc. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package fuzz is a library for populating go objects with random values. -package fuzz diff --git a/cmd/bpa-operator/vendor/github.com/google/gofuzz/fuzz.go b/cmd/bpa-operator/vendor/github.com/google/gofuzz/fuzz.go deleted file mode 100644 index 1dfa80a..0000000 --- a/cmd/bpa-operator/vendor/github.com/google/gofuzz/fuzz.go +++ /dev/null @@ -1,487 +0,0 @@ -/* -Copyright 2014 Google Inc. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fuzz - -import ( - "fmt" - "math/rand" - "reflect" - "time" -) - -// fuzzFuncMap is a map from a type to a fuzzFunc that handles that type. -type fuzzFuncMap map[reflect.Type]reflect.Value - -// Fuzzer knows how to fill any object with random fields. -type Fuzzer struct { - fuzzFuncs fuzzFuncMap - defaultFuzzFuncs fuzzFuncMap - r *rand.Rand - nilChance float64 - minElements int - maxElements int - maxDepth int -} - -// New returns a new Fuzzer. Customize your Fuzzer further by calling Funcs, -// RandSource, NilChance, or NumElements in any order. -func New() *Fuzzer { - return NewWithSeed(time.Now().UnixNano()) -} - -func NewWithSeed(seed int64) *Fuzzer { - f := &Fuzzer{ - defaultFuzzFuncs: fuzzFuncMap{ - reflect.TypeOf(&time.Time{}): reflect.ValueOf(fuzzTime), - }, - - fuzzFuncs: fuzzFuncMap{}, - r: rand.New(rand.NewSource(seed)), - nilChance: .2, - minElements: 1, - maxElements: 10, - maxDepth: 100, - } - return f -} - -// Funcs adds each entry in fuzzFuncs as a custom fuzzing function. -// -// Each entry in fuzzFuncs must be a function taking two parameters. -// The first parameter must be a pointer or map. It is the variable that -// function will fill with random data. The second parameter must be a -// fuzz.Continue, which will provide a source of randomness and a way -// to automatically continue fuzzing smaller pieces of the first parameter. -// -// These functions are called sensibly, e.g., if you wanted custom string -// fuzzing, the function `func(s *string, c fuzz.Continue)` would get -// called and passed the address of strings. Maps and pointers will always -// be made/new'd for you, ignoring the NilChange option. For slices, it -// doesn't make much sense to pre-create them--Fuzzer doesn't know how -// long you want your slice--so take a pointer to a slice, and make it -// yourself. (If you don't want your map/pointer type pre-made, take a -// pointer to it, and make it yourself.) See the examples for a range of -// custom functions. -func (f *Fuzzer) Funcs(fuzzFuncs ...interface{}) *Fuzzer { - for i := range fuzzFuncs { - v := reflect.ValueOf(fuzzFuncs[i]) - if v.Kind() != reflect.Func { - panic("Need only funcs!") - } - t := v.Type() - if t.NumIn() != 2 || t.NumOut() != 0 { - panic("Need 2 in and 0 out params!") - } - argT := t.In(0) - switch argT.Kind() { - case reflect.Ptr, reflect.Map: - default: - panic("fuzzFunc must take pointer or map type") - } - if t.In(1) != reflect.TypeOf(Continue{}) { - panic("fuzzFunc's second parameter must be type fuzz.Continue") - } - f.fuzzFuncs[argT] = v - } - return f -} - -// RandSource causes f to get values from the given source of randomness. -// Use if you want deterministic fuzzing. -func (f *Fuzzer) RandSource(s rand.Source) *Fuzzer { - f.r = rand.New(s) - return f -} - -// NilChance sets the probability of creating a nil pointer, map, or slice to -// 'p'. 'p' should be between 0 (no nils) and 1 (all nils), inclusive. -func (f *Fuzzer) NilChance(p float64) *Fuzzer { - if p < 0 || p > 1 { - panic("p should be between 0 and 1, inclusive.") - } - f.nilChance = p - return f -} - -// NumElements sets the minimum and maximum number of elements that will be -// added to a non-nil map or slice. -func (f *Fuzzer) NumElements(atLeast, atMost int) *Fuzzer { - if atLeast > atMost { - panic("atLeast must be <= atMost") - } - if atLeast < 0 { - panic("atLeast must be >= 0") - } - f.minElements = atLeast - f.maxElements = atMost - return f -} - -func (f *Fuzzer) genElementCount() int { - if f.minElements == f.maxElements { - return f.minElements - } - return f.minElements + f.r.Intn(f.maxElements-f.minElements+1) -} - -func (f *Fuzzer) genShouldFill() bool { - return f.r.Float64() > f.nilChance -} - -// MaxDepth sets the maximum number of recursive fuzz calls that will be made -// before stopping. This includes struct members, pointers, and map and slice -// elements. -func (f *Fuzzer) MaxDepth(d int) *Fuzzer { - f.maxDepth = d - return f -} - -// Fuzz recursively fills all of obj's fields with something random. First -// this tries to find a custom fuzz function (see Funcs). If there is no -// custom function this tests whether the object implements fuzz.Interface and, -// if so, calls Fuzz on it to fuzz itself. If that fails, this will see if -// there is a default fuzz function provided by this package. If all of that -// fails, this will generate random values for all primitive fields and then -// recurse for all non-primitives. -// -// This is safe for cyclic or tree-like structs, up to a limit. Use the -// MaxDepth method to adjust how deep you need it to recurse. -// -// obj must be a pointer. Only exported (public) fields can be set (thanks, -// golang :/ ) Intended for tests, so will panic on bad input or unimplemented -// fields. -func (f *Fuzzer) Fuzz(obj interface{}) { - v := reflect.ValueOf(obj) - if v.Kind() != reflect.Ptr { - panic("needed ptr!") - } - v = v.Elem() - f.fuzzWithContext(v, 0) -} - -// FuzzNoCustom is just like Fuzz, except that any custom fuzz function for -// obj's type will not be called and obj will not be tested for fuzz.Interface -// conformance. This applies only to obj and not other instances of obj's -// type. -// Not safe for cyclic or tree-like structs! -// obj must be a pointer. Only exported (public) fields can be set (thanks, golang :/ ) -// Intended for tests, so will panic on bad input or unimplemented fields. -func (f *Fuzzer) FuzzNoCustom(obj interface{}) { - v := reflect.ValueOf(obj) - if v.Kind() != reflect.Ptr { - panic("needed ptr!") - } - v = v.Elem() - f.fuzzWithContext(v, flagNoCustomFuzz) -} - -const ( - // Do not try to find a custom fuzz function. Does not apply recursively. - flagNoCustomFuzz uint64 = 1 << iota -) - -func (f *Fuzzer) fuzzWithContext(v reflect.Value, flags uint64) { - fc := &fuzzerContext{fuzzer: f} - fc.doFuzz(v, flags) -} - -// fuzzerContext carries context about a single fuzzing run, which lets Fuzzer -// be thread-safe. -type fuzzerContext struct { - fuzzer *Fuzzer - curDepth int -} - -func (fc *fuzzerContext) doFuzz(v reflect.Value, flags uint64) { - if fc.curDepth >= fc.fuzzer.maxDepth { - return - } - fc.curDepth++ - defer func() { fc.curDepth-- }() - - if !v.CanSet() { - return - } - - if flags&flagNoCustomFuzz == 0 { - // Check for both pointer and non-pointer custom functions. - if v.CanAddr() && fc.tryCustom(v.Addr()) { - return - } - if fc.tryCustom(v) { - return - } - } - - if fn, ok := fillFuncMap[v.Kind()]; ok { - fn(v, fc.fuzzer.r) - return - } - switch v.Kind() { - case reflect.Map: - if fc.fuzzer.genShouldFill() { - v.Set(reflect.MakeMap(v.Type())) - n := fc.fuzzer.genElementCount() - for i := 0; i < n; i++ { - key := reflect.New(v.Type().Key()).Elem() - fc.doFuzz(key, 0) - val := reflect.New(v.Type().Elem()).Elem() - fc.doFuzz(val, 0) - v.SetMapIndex(key, val) - } - return - } - v.Set(reflect.Zero(v.Type())) - case reflect.Ptr: - if fc.fuzzer.genShouldFill() { - v.Set(reflect.New(v.Type().Elem())) - fc.doFuzz(v.Elem(), 0) - return - } - v.Set(reflect.Zero(v.Type())) - case reflect.Slice: - if fc.fuzzer.genShouldFill() { - n := fc.fuzzer.genElementCount() - v.Set(reflect.MakeSlice(v.Type(), n, n)) - for i := 0; i < n; i++ { - fc.doFuzz(v.Index(i), 0) - } - return - } - v.Set(reflect.Zero(v.Type())) - case reflect.Array: - if fc.fuzzer.genShouldFill() { - n := v.Len() - for i := 0; i < n; i++ { - fc.doFuzz(v.Index(i), 0) - } - return - } - v.Set(reflect.Zero(v.Type())) - case reflect.Struct: - for i := 0; i < v.NumField(); i++ { - fc.doFuzz(v.Field(i), 0) - } - case reflect.Chan: - fallthrough - case reflect.Func: - fallthrough - case reflect.Interface: - fallthrough - default: - panic(fmt.Sprintf("Can't handle %#v", v.Interface())) - } -} - -// tryCustom searches for custom handlers, and returns true iff it finds a match -// and successfully randomizes v. -func (fc *fuzzerContext) tryCustom(v reflect.Value) bool { - // First: see if we have a fuzz function for it. - doCustom, ok := fc.fuzzer.fuzzFuncs[v.Type()] - if !ok { - // Second: see if it can fuzz itself. - if v.CanInterface() { - intf := v.Interface() - if fuzzable, ok := intf.(Interface); ok { - fuzzable.Fuzz(Continue{fc: fc, Rand: fc.fuzzer.r}) - return true - } - } - // Finally: see if there is a default fuzz function. - doCustom, ok = fc.fuzzer.defaultFuzzFuncs[v.Type()] - if !ok { - return false - } - } - - switch v.Kind() { - case reflect.Ptr: - if v.IsNil() { - if !v.CanSet() { - return false - } - v.Set(reflect.New(v.Type().Elem())) - } - case reflect.Map: - if v.IsNil() { - if !v.CanSet() { - return false - } - v.Set(reflect.MakeMap(v.Type())) - } - default: - return false - } - - doCustom.Call([]reflect.Value{v, reflect.ValueOf(Continue{ - fc: fc, - Rand: fc.fuzzer.r, - })}) - return true -} - -// Interface represents an object that knows how to fuzz itself. Any time we -// find a type that implements this interface we will delegate the act of -// fuzzing itself. -type Interface interface { - Fuzz(c Continue) -} - -// Continue can be passed to custom fuzzing functions to allow them to use -// the correct source of randomness and to continue fuzzing their members. -type Continue struct { - fc *fuzzerContext - - // For convenience, Continue implements rand.Rand via embedding. - // Use this for generating any randomness if you want your fuzzing - // to be repeatable for a given seed. - *rand.Rand -} - -// Fuzz continues fuzzing obj. obj must be a pointer. -func (c Continue) Fuzz(obj interface{}) { - v := reflect.ValueOf(obj) - if v.Kind() != reflect.Ptr { - panic("needed ptr!") - } - v = v.Elem() - c.fc.doFuzz(v, 0) -} - -// FuzzNoCustom continues fuzzing obj, except that any custom fuzz function for -// obj's type will not be called and obj will not be tested for fuzz.Interface -// conformance. This applies only to obj and not other instances of obj's -// type. -func (c Continue) FuzzNoCustom(obj interface{}) { - v := reflect.ValueOf(obj) - if v.Kind() != reflect.Ptr { - panic("needed ptr!") - } - v = v.Elem() - c.fc.doFuzz(v, flagNoCustomFuzz) -} - -// RandString makes a random string up to 20 characters long. The returned string -// may include a variety of (valid) UTF-8 encodings. -func (c Continue) RandString() string { - return randString(c.Rand) -} - -// RandUint64 makes random 64 bit numbers. -// Weirdly, rand doesn't have a function that gives you 64 random bits. -func (c Continue) RandUint64() uint64 { - return randUint64(c.Rand) -} - -// RandBool returns true or false randomly. -func (c Continue) RandBool() bool { - return randBool(c.Rand) -} - -func fuzzInt(v reflect.Value, r *rand.Rand) { - v.SetInt(int64(randUint64(r))) -} - -func fuzzUint(v reflect.Value, r *rand.Rand) { - v.SetUint(randUint64(r)) -} - -func fuzzTime(t *time.Time, c Continue) { - var sec, nsec int64 - // Allow for about 1000 years of random time values, which keeps things - // like JSON parsing reasonably happy. - sec = c.Rand.Int63n(1000 * 365 * 24 * 60 * 60) - c.Fuzz(&nsec) - *t = time.Unix(sec, nsec) -} - -var fillFuncMap = map[reflect.Kind]func(reflect.Value, *rand.Rand){ - reflect.Bool: func(v reflect.Value, r *rand.Rand) { - v.SetBool(randBool(r)) - }, - reflect.Int: fuzzInt, - reflect.Int8: fuzzInt, - reflect.Int16: fuzzInt, - reflect.Int32: fuzzInt, - reflect.Int64: fuzzInt, - reflect.Uint: fuzzUint, - reflect.Uint8: fuzzUint, - reflect.Uint16: fuzzUint, - reflect.Uint32: fuzzUint, - reflect.Uint64: fuzzUint, - reflect.Uintptr: fuzzUint, - reflect.Float32: func(v reflect.Value, r *rand.Rand) { - v.SetFloat(float64(r.Float32())) - }, - reflect.Float64: func(v reflect.Value, r *rand.Rand) { - v.SetFloat(r.Float64()) - }, - reflect.Complex64: func(v reflect.Value, r *rand.Rand) { - panic("unimplemented") - }, - reflect.Complex128: func(v reflect.Value, r *rand.Rand) { - panic("unimplemented") - }, - reflect.String: func(v reflect.Value, r *rand.Rand) { - v.SetString(randString(r)) - }, - reflect.UnsafePointer: func(v reflect.Value, r *rand.Rand) { - panic("unimplemented") - }, -} - -// randBool returns true or false randomly. -func randBool(r *rand.Rand) bool { - if r.Int()&1 == 1 { - return true - } - return false -} - -type charRange struct { - first, last rune -} - -// choose returns a random unicode character from the given range, using the -// given randomness source. -func (r *charRange) choose(rand *rand.Rand) rune { - count := int64(r.last - r.first) - return r.first + rune(rand.Int63n(count)) -} - -var unicodeRanges = []charRange{ - {' ', '~'}, // ASCII characters - {'\u00a0', '\u02af'}, // Multi-byte encoded characters - {'\u4e00', '\u9fff'}, // Common CJK (even longer encodings) -} - -// randString makes a random string up to 20 characters long. The returned string -// may include a variety of (valid) UTF-8 encodings. -func randString(r *rand.Rand) string { - n := r.Intn(20) - runes := make([]rune, n) - for i := range runes { - runes[i] = unicodeRanges[r.Intn(len(unicodeRanges))].choose(r) - } - return string(runes) -} - -// randUint64 makes random 64 bit numbers. -// Weirdly, rand doesn't have a function that gives you 64 random bits. -func randUint64(r *rand.Rand) uint64 { - return uint64(r.Uint32())<<32 | uint64(r.Uint32()) -} diff --git a/cmd/bpa-operator/vendor/github.com/google/gofuzz/go.mod b/cmd/bpa-operator/vendor/github.com/google/gofuzz/go.mod deleted file mode 100644 index 8ec4fe9..0000000 --- a/cmd/bpa-operator/vendor/github.com/google/gofuzz/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/google/gofuzz - -go 1.12 diff --git a/cmd/bpa-operator/vendor/github.com/google/uuid/.travis.yml b/cmd/bpa-operator/vendor/github.com/google/uuid/.travis.yml deleted file mode 100644 index d8156a6..0000000 --- a/cmd/bpa-operator/vendor/github.com/google/uuid/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go - -go: - - 1.4.3 - - 1.5.3 - - tip - -script: - - go test -v ./... diff --git a/cmd/bpa-operator/vendor/github.com/google/uuid/CONTRIBUTING.md b/cmd/bpa-operator/vendor/github.com/google/uuid/CONTRIBUTING.md deleted file mode 100644 index 04fdf09..0000000 --- a/cmd/bpa-operator/vendor/github.com/google/uuid/CONTRIBUTING.md +++ /dev/null @@ -1,10 +0,0 @@ -# How to contribute - -We definitely welcome patches and contribution to this project! - -### Legal requirements - -In order to protect both you and ourselves, you will need to sign the -[Contributor License Agreement](https://cla.developers.google.com/clas). - -You may have already signed it for other Google projects. diff --git a/cmd/bpa-operator/vendor/github.com/google/uuid/CONTRIBUTORS b/cmd/bpa-operator/vendor/github.com/google/uuid/CONTRIBUTORS deleted file mode 100644 index b4bb97f..0000000 --- a/cmd/bpa-operator/vendor/github.com/google/uuid/CONTRIBUTORS +++ /dev/null @@ -1,9 +0,0 @@ -Paul Borman -bmatsuo -shawnps -theory -jboverfelt -dsymonds -cd1 -wallclockbuilder -dansouza diff --git a/cmd/bpa-operator/vendor/github.com/google/uuid/LICENSE b/cmd/bpa-operator/vendor/github.com/google/uuid/LICENSE deleted file mode 100644 index 5dc6826..0000000 --- a/cmd/bpa-operator/vendor/github.com/google/uuid/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009,2014 Google Inc. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/cmd/bpa-operator/vendor/github.com/google/uuid/README.md b/cmd/bpa-operator/vendor/github.com/google/uuid/README.md deleted file mode 100644 index 9d92c11..0000000 --- a/cmd/bpa-operator/vendor/github.com/google/uuid/README.md +++ /dev/null @@ -1,19 +0,0 @@ -# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master) -The uuid package generates and inspects UUIDs based on -[RFC 4122](http://tools.ietf.org/html/rfc4122) -and DCE 1.1: Authentication and Security Services. - -This package is based on the github.com/pborman/uuid package (previously named -code.google.com/p/go-uuid). It differs from these earlier packages in that -a UUID is a 16 byte array rather than a byte slice. One loss due to this -change is the ability to represent an invalid UUID (vs a NIL UUID). - -###### Install -`go get github.com/google/uuid` - -###### Documentation -[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid) - -Full `go doc` style documentation for the package can be viewed online without -installing this package by using the GoDoc site here: -http://godoc.org/github.com/google/uuid diff --git a/cmd/bpa-operator/vendor/github.com/google/uuid/dce.go b/cmd/bpa-operator/vendor/github.com/google/uuid/dce.go deleted file mode 100644 index fa820b9..0000000 --- a/cmd/bpa-operator/vendor/github.com/google/uuid/dce.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" - "fmt" - "os" -) - -// A Domain represents a Version 2 domain -type Domain byte - -// Domain constants for DCE Security (Version 2) UUIDs. -const ( - Person = Domain(0) - Group = Domain(1) - Org = Domain(2) -) - -// NewDCESecurity returns a DCE Security (Version 2) UUID. -// -// The domain should be one of Person, Group or Org. -// On a POSIX system the id should be the users UID for the Person -// domain and the users GID for the Group. The meaning of id for -// the domain Org or on non-POSIX systems is site defined. -// -// For a given domain/id pair the same token may be returned for up to -// 7 minutes and 10 seconds. -func NewDCESecurity(domain Domain, id uint32) (UUID, error) { - uuid, err := NewUUID() - if err == nil { - uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 - uuid[9] = byte(domain) - binary.BigEndian.PutUint32(uuid[0:], id) - } - return uuid, err -} - -// NewDCEPerson returns a DCE Security (Version 2) UUID in the person -// domain with the id returned by os.Getuid. -// -// NewDCESecurity(Person, uint32(os.Getuid())) -func NewDCEPerson() (UUID, error) { - return NewDCESecurity(Person, uint32(os.Getuid())) -} - -// NewDCEGroup returns a DCE Security (Version 2) UUID in the group -// domain with the id returned by os.Getgid. -// -// NewDCESecurity(Group, uint32(os.Getgid())) -func NewDCEGroup() (UUID, error) { - return NewDCESecurity(Group, uint32(os.Getgid())) -} - -// Domain returns the domain for a Version 2 UUID. Domains are only defined -// for Version 2 UUIDs. -func (uuid UUID) Domain() Domain { - return Domain(uuid[9]) -} - -// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2 -// UUIDs. -func (uuid UUID) ID() uint32 { - return binary.BigEndian.Uint32(uuid[0:4]) -} - -func (d Domain) String() string { - switch d { - case Person: - return "Person" - case Group: - return "Group" - case Org: - return "Org" - } - return fmt.Sprintf("Domain%d", int(d)) -} diff --git a/cmd/bpa-operator/vendor/github.com/google/uuid/doc.go b/cmd/bpa-operator/vendor/github.com/google/uuid/doc.go deleted file mode 100644 index 5b8a4b9..0000000 --- a/cmd/bpa-operator/vendor/github.com/google/uuid/doc.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package uuid generates and inspects UUIDs. -// -// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security -// Services. -// -// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to -// maps or compared directly. -package uuid diff --git a/cmd/bpa-operator/vendor/github.com/google/uuid/go.mod b/cmd/bpa-operator/vendor/github.com/google/uuid/go.mod deleted file mode 100644 index fc84cd7..0000000 --- a/cmd/bpa-operator/vendor/github.com/google/uuid/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/google/uuid diff --git a/cmd/bpa-operator/vendor/github.com/google/uuid/hash.go b/cmd/bpa-operator/vendor/github.com/google/uuid/hash.go deleted file mode 100644 index b174616..0000000 --- a/cmd/bpa-operator/vendor/github.com/google/uuid/hash.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "crypto/md5" - "crypto/sha1" - "hash" -) - -// Well known namespace IDs and UUIDs -var ( - NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) - NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) - NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) - NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) - Nil UUID // empty UUID, all zeros -) - -// NewHash returns a new UUID derived from the hash of space concatenated with -// data generated by h. The hash should be at least 16 byte in length. The -// first 16 bytes of the hash are used to form the UUID. The version of the -// UUID will be the lower 4 bits of version. NewHash is used to implement -// NewMD5 and NewSHA1. -func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { - h.Reset() - h.Write(space[:]) - h.Write(data) - s := h.Sum(nil) - var uuid UUID - copy(uuid[:], s) - uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) - uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant - return uuid -} - -// NewMD5 returns a new MD5 (Version 3) UUID based on the -// supplied name space and data. It is the same as calling: -// -// NewHash(md5.New(), space, data, 3) -func NewMD5(space UUID, data []byte) UUID { - return NewHash(md5.New(), space, data, 3) -} - -// NewSHA1 returns a new SHA1 (Version 5) UUID based on the -// supplied name space and data. It is the same as calling: -// -// NewHash(sha1.New(), space, data, 5) -func NewSHA1(space UUID, data []byte) UUID { - return NewHash(sha1.New(), space, data, 5) -} diff --git a/cmd/bpa-operator/vendor/github.com/google/uuid/marshal.go b/cmd/bpa-operator/vendor/github.com/google/uuid/marshal.go deleted file mode 100644 index 7f9e0c6..0000000 --- a/cmd/bpa-operator/vendor/github.com/google/uuid/marshal.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import "fmt" - -// MarshalText implements encoding.TextMarshaler. -func (uuid UUID) MarshalText() ([]byte, error) { - var js [36]byte - encodeHex(js[:], uuid) - return js[:], nil -} - -// UnmarshalText implements encoding.TextUnmarshaler. -func (uuid *UUID) UnmarshalText(data []byte) error { - id, err := ParseBytes(data) - if err == nil { - *uuid = id - } - return err -} - -// MarshalBinary implements encoding.BinaryMarshaler. -func (uuid UUID) MarshalBinary() ([]byte, error) { - return uuid[:], nil -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler. -func (uuid *UUID) UnmarshalBinary(data []byte) error { - if len(data) != 16 { - return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) - } - copy(uuid[:], data) - return nil -} diff --git a/cmd/bpa-operator/vendor/github.com/google/uuid/node.go b/cmd/bpa-operator/vendor/github.com/google/uuid/node.go deleted file mode 100644 index d651a2b..0000000 --- a/cmd/bpa-operator/vendor/github.com/google/uuid/node.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "sync" -) - -var ( - nodeMu sync.Mutex - ifname string // name of interface being used - nodeID [6]byte // hardware for version 1 UUIDs - zeroID [6]byte // nodeID with only 0's -) - -// NodeInterface returns the name of the interface from which the NodeID was -// derived. The interface "user" is returned if the NodeID was set by -// SetNodeID. -func NodeInterface() string { - defer nodeMu.Unlock() - nodeMu.Lock() - return ifname -} - -// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. -// If name is "" then the first usable interface found will be used or a random -// Node ID will be generated. If a named interface cannot be found then false -// is returned. -// -// SetNodeInterface never fails when name is "". -func SetNodeInterface(name string) bool { - defer nodeMu.Unlock() - nodeMu.Lock() - return setNodeInterface(name) -} - -func setNodeInterface(name string) bool { - iname, addr := getHardwareInterface(name) // null implementation for js - if iname != "" && addr != nil { - ifname = iname - copy(nodeID[:], addr) - return true - } - - // We found no interfaces with a valid hardware address. If name - // does not specify a specific interface generate a random Node ID - // (section 4.1.6) - if name == "" { - ifname = "random" - randomBits(nodeID[:]) - return true - } - return false -} - -// NodeID returns a slice of a copy of the current Node ID, setting the Node ID -// if not already set. -func NodeID() []byte { - defer nodeMu.Unlock() - nodeMu.Lock() - if nodeID == zeroID { - setNodeInterface("") - } - nid := nodeID - return nid[:] -} - -// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes -// of id are used. If id is less than 6 bytes then false is returned and the -// Node ID is not set. -func SetNodeID(id []byte) bool { - if len(id) < 6 { - return false - } - defer nodeMu.Unlock() - nodeMu.Lock() - copy(nodeID[:], id) - ifname = "user" - return true -} - -// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is -// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. -func (uuid UUID) NodeID() []byte { - var node [6]byte - copy(node[:], uuid[10:]) - return node[:] -} diff --git a/cmd/bpa-operator/vendor/github.com/google/uuid/node_js.go b/cmd/bpa-operator/vendor/github.com/google/uuid/node_js.go deleted file mode 100644 index 24b78ed..0000000 --- a/cmd/bpa-operator/vendor/github.com/google/uuid/node_js.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2017 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build js - -package uuid - -// getHardwareInterface returns nil values for the JS version of the code. -// This remvoves the "net" dependency, because it is not used in the browser. -// Using the "net" library inflates the size of the transpiled JS code by 673k bytes. -func getHardwareInterface(name string) (string, []byte) { return "", nil } diff --git a/cmd/bpa-operator/vendor/github.com/google/uuid/node_net.go b/cmd/bpa-operator/vendor/github.com/google/uuid/node_net.go deleted file mode 100644 index 0cbbcdd..0000000 --- a/cmd/bpa-operator/vendor/github.com/google/uuid/node_net.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2017 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !js - -package uuid - -import "net" - -var interfaces []net.Interface // cached list of interfaces - -// getHardwareInterface returns the name and hardware address of interface name. -// If name is "" then the name and hardware address of one of the system's -// interfaces is returned. If no interfaces are found (name does not exist or -// there are no interfaces) then "", nil is returned. -// -// Only addresses of at least 6 bytes are returned. -func getHardwareInterface(name string) (string, []byte) { - if interfaces == nil { - var err error - interfaces, err = net.Interfaces() - if err != nil { - return "", nil - } - } - for _, ifs := range interfaces { - if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { - return ifs.Name, ifs.HardwareAddr - } - } - return "", nil -} diff --git a/cmd/bpa-operator/vendor/github.com/google/uuid/sql.go b/cmd/bpa-operator/vendor/github.com/google/uuid/sql.go deleted file mode 100644 index f326b54..0000000 --- a/cmd/bpa-operator/vendor/github.com/google/uuid/sql.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "database/sql/driver" - "fmt" -) - -// Scan implements sql.Scanner so UUIDs can be read from databases transparently -// Currently, database types that map to string and []byte are supported. Please -// consult database-specific driver documentation for matching types. -func (uuid *UUID) Scan(src interface{}) error { - switch src := src.(type) { - case nil: - return nil - - case string: - // if an empty UUID comes from a table, we return a null UUID - if src == "" { - return nil - } - - // see Parse for required string format - u, err := Parse(src) - if err != nil { - return fmt.Errorf("Scan: %v", err) - } - - *uuid = u - - case []byte: - // if an empty UUID comes from a table, we return a null UUID - if len(src) == 0 { - return nil - } - - // assumes a simple slice of bytes if 16 bytes - // otherwise attempts to parse - if len(src) != 16 { - return uuid.Scan(string(src)) - } - copy((*uuid)[:], src) - - default: - return fmt.Errorf("Scan: unable to scan type %T into UUID", src) - } - - return nil -} - -// Value implements sql.Valuer so that UUIDs can be written to databases -// transparently. Currently, UUIDs map to strings. Please consult -// database-specific driver documentation for matching types. -func (uuid UUID) Value() (driver.Value, error) { - return uuid.String(), nil -} diff --git a/cmd/bpa-operator/vendor/github.com/google/uuid/time.go b/cmd/bpa-operator/vendor/github.com/google/uuid/time.go deleted file mode 100644 index e6ef06c..0000000 --- a/cmd/bpa-operator/vendor/github.com/google/uuid/time.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" - "sync" - "time" -) - -// A Time represents a time as the number of 100's of nanoseconds since 15 Oct -// 1582. -type Time int64 - -const ( - lillian = 2299160 // Julian day of 15 Oct 1582 - unix = 2440587 // Julian day of 1 Jan 1970 - epoch = unix - lillian // Days between epochs - g1582 = epoch * 86400 // seconds between epochs - g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs -) - -var ( - timeMu sync.Mutex - lasttime uint64 // last time we returned - clockSeq uint16 // clock sequence for this run - - timeNow = time.Now // for testing -) - -// UnixTime converts t the number of seconds and nanoseconds using the Unix -// epoch of 1 Jan 1970. -func (t Time) UnixTime() (sec, nsec int64) { - sec = int64(t - g1582ns100) - nsec = (sec % 10000000) * 100 - sec /= 10000000 - return sec, nsec -} - -// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and -// clock sequence as well as adjusting the clock sequence as needed. An error -// is returned if the current time cannot be determined. -func GetTime() (Time, uint16, error) { - defer timeMu.Unlock() - timeMu.Lock() - return getTime() -} - -func getTime() (Time, uint16, error) { - t := timeNow() - - // If we don't have a clock sequence already, set one. - if clockSeq == 0 { - setClockSequence(-1) - } - now := uint64(t.UnixNano()/100) + g1582ns100 - - // If time has gone backwards with this clock sequence then we - // increment the clock sequence - if now <= lasttime { - clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000 - } - lasttime = now - return Time(now), clockSeq, nil -} - -// ClockSequence returns the current clock sequence, generating one if not -// already set. The clock sequence is only used for Version 1 UUIDs. -// -// The uuid package does not use global static storage for the clock sequence or -// the last time a UUID was generated. Unless SetClockSequence is used, a new -// random clock sequence is generated the first time a clock sequence is -// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) -func ClockSequence() int { - defer timeMu.Unlock() - timeMu.Lock() - return clockSequence() -} - -func clockSequence() int { - if clockSeq == 0 { - setClockSequence(-1) - } - return int(clockSeq & 0x3fff) -} - -// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to -// -1 causes a new sequence to be generated. -func SetClockSequence(seq int) { - defer timeMu.Unlock() - timeMu.Lock() - setClockSequence(seq) -} - -func setClockSequence(seq int) { - if seq == -1 { - var b [2]byte - randomBits(b[:]) // clock sequence - seq = int(b[0])<<8 | int(b[1]) - } - oldSeq := clockSeq - clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant - if oldSeq != clockSeq { - lasttime = 0 - } -} - -// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in -// uuid. The time is only defined for version 1 and 2 UUIDs. -func (uuid UUID) Time() Time { - time := int64(binary.BigEndian.Uint32(uuid[0:4])) - time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 - time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 - return Time(time) -} - -// ClockSequence returns the clock sequence encoded in uuid. -// The clock sequence is only well defined for version 1 and 2 UUIDs. -func (uuid UUID) ClockSequence() int { - return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff -} diff --git a/cmd/bpa-operator/vendor/github.com/google/uuid/util.go b/cmd/bpa-operator/vendor/github.com/google/uuid/util.go deleted file mode 100644 index 5ea6c73..0000000 --- a/cmd/bpa-operator/vendor/github.com/google/uuid/util.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "io" -) - -// randomBits completely fills slice b with random data. -func randomBits(b []byte) { - if _, err := io.ReadFull(rander, b); err != nil { - panic(err.Error()) // rand should never fail - } -} - -// xvalues returns the value of a byte as a hexadecimal digit or 255. -var xvalues = [256]byte{ - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, - 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, -} - -// xtob converts hex characters x1 and x2 into a byte. -func xtob(x1, x2 byte) (byte, bool) { - b1 := xvalues[x1] - b2 := xvalues[x2] - return (b1 << 4) | b2, b1 != 255 && b2 != 255 -} diff --git a/cmd/bpa-operator/vendor/github.com/google/uuid/uuid.go b/cmd/bpa-operator/vendor/github.com/google/uuid/uuid.go deleted file mode 100644 index 524404c..0000000 --- a/cmd/bpa-operator/vendor/github.com/google/uuid/uuid.go +++ /dev/null @@ -1,245 +0,0 @@ -// Copyright 2018 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "bytes" - "crypto/rand" - "encoding/hex" - "errors" - "fmt" - "io" - "strings" -) - -// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC -// 4122. -type UUID [16]byte - -// A Version represents a UUID's version. -type Version byte - -// A Variant represents a UUID's variant. -type Variant byte - -// Constants returned by Variant. -const ( - Invalid = Variant(iota) // Invalid UUID - RFC4122 // The variant specified in RFC4122 - Reserved // Reserved, NCS backward compatibility. - Microsoft // Reserved, Microsoft Corporation backward compatibility. - Future // Reserved for future definition. -) - -var rander = rand.Reader // random function - -// Parse decodes s into a UUID or returns an error. Both the standard UUID -// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and -// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the -// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex -// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx. -func Parse(s string) (UUID, error) { - var uuid UUID - switch len(s) { - // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - case 36: - - // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - case 36 + 9: - if strings.ToLower(s[:9]) != "urn:uuid:" { - return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) - } - s = s[9:] - - // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} - case 36 + 2: - s = s[1:] - - // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx - case 32: - var ok bool - for i := range uuid { - uuid[i], ok = xtob(s[i*2], s[i*2+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - } - return uuid, nil - default: - return uuid, fmt.Errorf("invalid UUID length: %d", len(s)) - } - // s is now at least 36 bytes long - // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { - return uuid, errors.New("invalid UUID format") - } - for i, x := range [16]int{ - 0, 2, 4, 6, - 9, 11, - 14, 16, - 19, 21, - 24, 26, 28, 30, 32, 34} { - v, ok := xtob(s[x], s[x+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - uuid[i] = v - } - return uuid, nil -} - -// ParseBytes is like Parse, except it parses a byte slice instead of a string. -func ParseBytes(b []byte) (UUID, error) { - var uuid UUID - switch len(b) { - case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) { - return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) - } - b = b[9:] - case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} - b = b[1:] - case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx - var ok bool - for i := 0; i < 32; i += 2 { - uuid[i/2], ok = xtob(b[i], b[i+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - } - return uuid, nil - default: - return uuid, fmt.Errorf("invalid UUID length: %d", len(b)) - } - // s is now at least 36 bytes long - // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' { - return uuid, errors.New("invalid UUID format") - } - for i, x := range [16]int{ - 0, 2, 4, 6, - 9, 11, - 14, 16, - 19, 21, - 24, 26, 28, 30, 32, 34} { - v, ok := xtob(b[x], b[x+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - uuid[i] = v - } - return uuid, nil -} - -// MustParse is like Parse but panics if the string cannot be parsed. -// It simplifies safe initialization of global variables holding compiled UUIDs. -func MustParse(s string) UUID { - uuid, err := Parse(s) - if err != nil { - panic(`uuid: Parse(` + s + `): ` + err.Error()) - } - return uuid -} - -// FromBytes creates a new UUID from a byte slice. Returns an error if the slice -// does not have a length of 16. The bytes are copied from the slice. -func FromBytes(b []byte) (uuid UUID, err error) { - err = uuid.UnmarshalBinary(b) - return uuid, err -} - -// Must returns uuid if err is nil and panics otherwise. -func Must(uuid UUID, err error) UUID { - if err != nil { - panic(err) - } - return uuid -} - -// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx -// , or "" if uuid is invalid. -func (uuid UUID) String() string { - var buf [36]byte - encodeHex(buf[:], uuid) - return string(buf[:]) -} - -// URN returns the RFC 2141 URN form of uuid, -// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. -func (uuid UUID) URN() string { - var buf [36 + 9]byte - copy(buf[:], "urn:uuid:") - encodeHex(buf[9:], uuid) - return string(buf[:]) -} - -func encodeHex(dst []byte, uuid UUID) { - hex.Encode(dst, uuid[:4]) - dst[8] = '-' - hex.Encode(dst[9:13], uuid[4:6]) - dst[13] = '-' - hex.Encode(dst[14:18], uuid[6:8]) - dst[18] = '-' - hex.Encode(dst[19:23], uuid[8:10]) - dst[23] = '-' - hex.Encode(dst[24:], uuid[10:]) -} - -// Variant returns the variant encoded in uuid. -func (uuid UUID) Variant() Variant { - switch { - case (uuid[8] & 0xc0) == 0x80: - return RFC4122 - case (uuid[8] & 0xe0) == 0xc0: - return Microsoft - case (uuid[8] & 0xe0) == 0xe0: - return Future - default: - return Reserved - } -} - -// Version returns the version of uuid. -func (uuid UUID) Version() Version { - return Version(uuid[6] >> 4) -} - -func (v Version) String() string { - if v > 15 { - return fmt.Sprintf("BAD_VERSION_%d", v) - } - return fmt.Sprintf("VERSION_%d", v) -} - -func (v Variant) String() string { - switch v { - case RFC4122: - return "RFC4122" - case Reserved: - return "Reserved" - case Microsoft: - return "Microsoft" - case Future: - return "Future" - case Invalid: - return "Invalid" - } - return fmt.Sprintf("BadVariant%d", int(v)) -} - -// SetRand sets the random number generator to r, which implements io.Reader. -// If r.Read returns an error when the package requests random data then -// a panic will be issued. -// -// Calling SetRand with nil sets the random number generator to the default -// generator. -func SetRand(r io.Reader) { - if r == nil { - rander = rand.Reader - return - } - rander = r -} diff --git a/cmd/bpa-operator/vendor/github.com/google/uuid/version1.go b/cmd/bpa-operator/vendor/github.com/google/uuid/version1.go deleted file mode 100644 index 199a1ac..0000000 --- a/cmd/bpa-operator/vendor/github.com/google/uuid/version1.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" -) - -// NewUUID returns a Version 1 UUID based on the current NodeID and clock -// sequence, and the current time. If the NodeID has not been set by SetNodeID -// or SetNodeInterface then it will be set automatically. If the NodeID cannot -// be set NewUUID returns nil. If clock sequence has not been set by -// SetClockSequence then it will be set automatically. If GetTime fails to -// return the current NewUUID returns nil and an error. -// -// In most cases, New should be used. -func NewUUID() (UUID, error) { - nodeMu.Lock() - if nodeID == zeroID { - setNodeInterface("") - } - nodeMu.Unlock() - - var uuid UUID - now, seq, err := GetTime() - if err != nil { - return uuid, err - } - - timeLow := uint32(now & 0xffffffff) - timeMid := uint16((now >> 32) & 0xffff) - timeHi := uint16((now >> 48) & 0x0fff) - timeHi |= 0x1000 // Version 1 - - binary.BigEndian.PutUint32(uuid[0:], timeLow) - binary.BigEndian.PutUint16(uuid[4:], timeMid) - binary.BigEndian.PutUint16(uuid[6:], timeHi) - binary.BigEndian.PutUint16(uuid[8:], seq) - copy(uuid[10:], nodeID[:]) - - return uuid, nil -} diff --git a/cmd/bpa-operator/vendor/github.com/google/uuid/version4.go b/cmd/bpa-operator/vendor/github.com/google/uuid/version4.go deleted file mode 100644 index 84af91c..0000000 --- a/cmd/bpa-operator/vendor/github.com/google/uuid/version4.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import "io" - -// New creates a new random UUID or panics. New is equivalent to -// the expression -// -// uuid.Must(uuid.NewRandom()) -func New() UUID { - return Must(NewRandom()) -} - -// NewRandom returns a Random (Version 4) UUID. -// -// The strength of the UUIDs is based on the strength of the crypto/rand -// package. -// -// A note about uniqueness derived from the UUID Wikipedia entry: -// -// Randomly generated UUIDs have 122 random bits. One's annual risk of being -// hit by a meteorite is estimated to be one chance in 17 billion, that -// means the probability is about 0.00000000006 (6 × 10−11), -// equivalent to the odds of creating a few tens of trillions of UUIDs in a -// year and having one duplicate. -func NewRandom() (UUID, error) { - var uuid UUID - _, err := io.ReadFull(rander, uuid[:]) - if err != nil { - return Nil, err - } - uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 - uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 - return uuid, nil -} diff --git a/cmd/bpa-operator/vendor/github.com/googleapis/gnostic/LICENSE b/cmd/bpa-operator/vendor/github.com/googleapis/gnostic/LICENSE deleted file mode 100644 index 6b0b127..0000000 --- a/cmd/bpa-operator/vendor/github.com/googleapis/gnostic/LICENSE +++ /dev/null @@ -1,203 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/cmd/bpa-operator/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go b/cmd/bpa-operator/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go deleted file mode 100644 index 5351f36..0000000 --- a/cmd/bpa-operator/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go +++ /dev/null @@ -1,8728 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// THIS FILE IS AUTOMATICALLY GENERATED. - -package openapi_v2 - -import ( - "fmt" - "github.com/googleapis/gnostic/compiler" - "gopkg.in/yaml.v2" - "regexp" - "strings" -) - -// Version returns the package name (and OpenAPI version). -func Version() string { - return "openapi_v2" -} - -// NewAdditionalPropertiesItem creates an object of type AdditionalPropertiesItem if possible, returning an error if not. -func NewAdditionalPropertiesItem(in interface{}, context *compiler.Context) (*AdditionalPropertiesItem, error) { - errors := make([]error, 0) - x := &AdditionalPropertiesItem{} - matched := false - // Schema schema = 1; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewSchema(m, compiler.NewContext("schema", context)) - if matchingError == nil { - x.Oneof = &AdditionalPropertiesItem_Schema{Schema: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // bool boolean = 2; - boolValue, ok := in.(bool) - if ok { - x.Oneof = &AdditionalPropertiesItem_Boolean{Boolean: boolValue} - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewAny creates an object of type Any if possible, returning an error if not. -func NewAny(in interface{}, context *compiler.Context) (*Any, error) { - errors := make([]error, 0) - x := &Any{} - bytes, _ := yaml.Marshal(in) - x.Yaml = string(bytes) - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewApiKeySecurity creates an object of type ApiKeySecurity if possible, returning an error if not. -func NewApiKeySecurity(in interface{}, context *compiler.Context) (*ApiKeySecurity, error) { - errors := make([]error, 0) - x := &ApiKeySecurity{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"in", "name", "type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "in", "name", "type"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [apiKey] - if ok && !compiler.StringArrayContainsValue([]string{"apiKey"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string name = 2; - v2 := compiler.MapValueForKey(m, "name") - if v2 != nil { - x.Name, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string in = 3; - v3 := compiler.MapValueForKey(m, "in") - if v3 != nil { - x.In, ok = v3.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [header query] - if ok && !compiler.StringArrayContainsValue([]string{"header", "query"}, x.In) { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 4; - v4 := compiler.MapValueForKey(m, "description") - if v4 != nil { - x.Description, ok = v4.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 5; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewBasicAuthenticationSecurity creates an object of type BasicAuthenticationSecurity if possible, returning an error if not. -func NewBasicAuthenticationSecurity(in interface{}, context *compiler.Context) (*BasicAuthenticationSecurity, error) { - errors := make([]error, 0) - x := &BasicAuthenticationSecurity{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "type"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [basic] - if ok && !compiler.StringArrayContainsValue([]string{"basic"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 2; - v2 := compiler.MapValueForKey(m, "description") - if v2 != nil { - x.Description, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 3; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewBodyParameter creates an object of type BodyParameter if possible, returning an error if not. -func NewBodyParameter(in interface{}, context *compiler.Context) (*BodyParameter, error) { - errors := make([]error, 0) - x := &BodyParameter{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"in", "name", "schema"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "in", "name", "required", "schema"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string description = 1; - v1 := compiler.MapValueForKey(m, "description") - if v1 != nil { - x.Description, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string name = 2; - v2 := compiler.MapValueForKey(m, "name") - if v2 != nil { - x.Name, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string in = 3; - v3 := compiler.MapValueForKey(m, "in") - if v3 != nil { - x.In, ok = v3.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [body] - if ok && !compiler.StringArrayContainsValue([]string{"body"}, x.In) { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool required = 4; - v4 := compiler.MapValueForKey(m, "required") - if v4 != nil { - x.Required, ok = v4.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Schema schema = 5; - v5 := compiler.MapValueForKey(m, "schema") - if v5 != nil { - var err error - x.Schema, err = NewSchema(v5, compiler.NewContext("schema", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny vendor_extension = 6; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewContact creates an object of type Contact if possible, returning an error if not. -func NewContact(in interface{}, context *compiler.Context) (*Contact, error) { - errors := make([]error, 0) - x := &Contact{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"email", "name", "url"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string url = 2; - v2 := compiler.MapValueForKey(m, "url") - if v2 != nil { - x.Url, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for url: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string email = 3; - v3 := compiler.MapValueForKey(m, "email") - if v3 != nil { - x.Email, ok = v3.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for email: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 4; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewDefault creates an object of type Default if possible, returning an error if not. -func NewDefault(in interface{}, context *compiler.Context) (*Default, error) { - errors := make([]error, 0) - x := &Default{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedAny additional_properties = 1; - // MAP: Any - x.AdditionalProperties = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewDefinitions creates an object of type Definitions if possible, returning an error if not. -func NewDefinitions(in interface{}, context *compiler.Context) (*Definitions, error) { - errors := make([]error, 0) - x := &Definitions{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedSchema additional_properties = 1; - // MAP: Schema - x.AdditionalProperties = make([]*NamedSchema, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - pair := &NamedSchema{} - pair.Name = k - var err error - pair.Value, err = NewSchema(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewDocument creates an object of type Document if possible, returning an error if not. -func NewDocument(in interface{}, context *compiler.Context) (*Document, error) { - errors := make([]error, 0) - x := &Document{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"info", "paths", "swagger"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"basePath", "consumes", "definitions", "externalDocs", "host", "info", "parameters", "paths", "produces", "responses", "schemes", "security", "securityDefinitions", "swagger", "tags"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string swagger = 1; - v1 := compiler.MapValueForKey(m, "swagger") - if v1 != nil { - x.Swagger, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for swagger: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [2.0] - if ok && !compiler.StringArrayContainsValue([]string{"2.0"}, x.Swagger) { - message := fmt.Sprintf("has unexpected value for swagger: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Info info = 2; - v2 := compiler.MapValueForKey(m, "info") - if v2 != nil { - var err error - x.Info, err = NewInfo(v2, compiler.NewContext("info", context)) - if err != nil { - errors = append(errors, err) - } - } - // string host = 3; - v3 := compiler.MapValueForKey(m, "host") - if v3 != nil { - x.Host, ok = v3.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for host: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string base_path = 4; - v4 := compiler.MapValueForKey(m, "basePath") - if v4 != nil { - x.BasePath, ok = v4.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for basePath: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated string schemes = 5; - v5 := compiler.MapValueForKey(m, "schemes") - if v5 != nil { - v, ok := v5.([]interface{}) - if ok { - x.Schemes = compiler.ConvertInterfaceArrayToStringArray(v) - } else { - message := fmt.Sprintf("has unexpected value for schemes: %+v (%T)", v5, v5) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [http https ws wss] - if ok && !compiler.StringArrayContainsValues([]string{"http", "https", "ws", "wss"}, x.Schemes) { - message := fmt.Sprintf("has unexpected value for schemes: %+v", v5) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated string consumes = 6; - v6 := compiler.MapValueForKey(m, "consumes") - if v6 != nil { - v, ok := v6.([]interface{}) - if ok { - x.Consumes = compiler.ConvertInterfaceArrayToStringArray(v) - } else { - message := fmt.Sprintf("has unexpected value for consumes: %+v (%T)", v6, v6) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated string produces = 7; - v7 := compiler.MapValueForKey(m, "produces") - if v7 != nil { - v, ok := v7.([]interface{}) - if ok { - x.Produces = compiler.ConvertInterfaceArrayToStringArray(v) - } else { - message := fmt.Sprintf("has unexpected value for produces: %+v (%T)", v7, v7) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Paths paths = 8; - v8 := compiler.MapValueForKey(m, "paths") - if v8 != nil { - var err error - x.Paths, err = NewPaths(v8, compiler.NewContext("paths", context)) - if err != nil { - errors = append(errors, err) - } - } - // Definitions definitions = 9; - v9 := compiler.MapValueForKey(m, "definitions") - if v9 != nil { - var err error - x.Definitions, err = NewDefinitions(v9, compiler.NewContext("definitions", context)) - if err != nil { - errors = append(errors, err) - } - } - // ParameterDefinitions parameters = 10; - v10 := compiler.MapValueForKey(m, "parameters") - if v10 != nil { - var err error - x.Parameters, err = NewParameterDefinitions(v10, compiler.NewContext("parameters", context)) - if err != nil { - errors = append(errors, err) - } - } - // ResponseDefinitions responses = 11; - v11 := compiler.MapValueForKey(m, "responses") - if v11 != nil { - var err error - x.Responses, err = NewResponseDefinitions(v11, compiler.NewContext("responses", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated SecurityRequirement security = 12; - v12 := compiler.MapValueForKey(m, "security") - if v12 != nil { - // repeated SecurityRequirement - x.Security = make([]*SecurityRequirement, 0) - a, ok := v12.([]interface{}) - if ok { - for _, item := range a { - y, err := NewSecurityRequirement(item, compiler.NewContext("security", context)) - if err != nil { - errors = append(errors, err) - } - x.Security = append(x.Security, y) - } - } - } - // SecurityDefinitions security_definitions = 13; - v13 := compiler.MapValueForKey(m, "securityDefinitions") - if v13 != nil { - var err error - x.SecurityDefinitions, err = NewSecurityDefinitions(v13, compiler.NewContext("securityDefinitions", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated Tag tags = 14; - v14 := compiler.MapValueForKey(m, "tags") - if v14 != nil { - // repeated Tag - x.Tags = make([]*Tag, 0) - a, ok := v14.([]interface{}) - if ok { - for _, item := range a { - y, err := NewTag(item, compiler.NewContext("tags", context)) - if err != nil { - errors = append(errors, err) - } - x.Tags = append(x.Tags, y) - } - } - } - // ExternalDocs external_docs = 15; - v15 := compiler.MapValueForKey(m, "externalDocs") - if v15 != nil { - var err error - x.ExternalDocs, err = NewExternalDocs(v15, compiler.NewContext("externalDocs", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny vendor_extension = 16; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewExamples creates an object of type Examples if possible, returning an error if not. -func NewExamples(in interface{}, context *compiler.Context) (*Examples, error) { - errors := make([]error, 0) - x := &Examples{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedAny additional_properties = 1; - // MAP: Any - x.AdditionalProperties = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewExternalDocs creates an object of type ExternalDocs if possible, returning an error if not. -func NewExternalDocs(in interface{}, context *compiler.Context) (*ExternalDocs, error) { - errors := make([]error, 0) - x := &ExternalDocs{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"url"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "url"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string description = 1; - v1 := compiler.MapValueForKey(m, "description") - if v1 != nil { - x.Description, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string url = 2; - v2 := compiler.MapValueForKey(m, "url") - if v2 != nil { - x.Url, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for url: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 3; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewFileSchema creates an object of type FileSchema if possible, returning an error if not. -func NewFileSchema(in interface{}, context *compiler.Context) (*FileSchema, error) { - errors := make([]error, 0) - x := &FileSchema{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"default", "description", "example", "externalDocs", "format", "readOnly", "required", "title", "type"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string format = 1; - v1 := compiler.MapValueForKey(m, "format") - if v1 != nil { - x.Format, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string title = 2; - v2 := compiler.MapValueForKey(m, "title") - if v2 != nil { - x.Title, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for title: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 3; - v3 := compiler.MapValueForKey(m, "description") - if v3 != nil { - x.Description, ok = v3.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any default = 4; - v4 := compiler.MapValueForKey(m, "default") - if v4 != nil { - var err error - x.Default, err = NewAny(v4, compiler.NewContext("default", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated string required = 5; - v5 := compiler.MapValueForKey(m, "required") - if v5 != nil { - v, ok := v5.([]interface{}) - if ok { - x.Required = compiler.ConvertInterfaceArrayToStringArray(v) - } else { - message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v5, v5) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string type = 6; - v6 := compiler.MapValueForKey(m, "type") - if v6 != nil { - x.Type, ok = v6.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [file] - if ok && !compiler.StringArrayContainsValue([]string{"file"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool read_only = 7; - v7 := compiler.MapValueForKey(m, "readOnly") - if v7 != nil { - x.ReadOnly, ok = v7.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for readOnly: %+v (%T)", v7, v7) - errors = append(errors, compiler.NewError(context, message)) - } - } - // ExternalDocs external_docs = 8; - v8 := compiler.MapValueForKey(m, "externalDocs") - if v8 != nil { - var err error - x.ExternalDocs, err = NewExternalDocs(v8, compiler.NewContext("externalDocs", context)) - if err != nil { - errors = append(errors, err) - } - } - // Any example = 9; - v9 := compiler.MapValueForKey(m, "example") - if v9 != nil { - var err error - x.Example, err = NewAny(v9, compiler.NewContext("example", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny vendor_extension = 10; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewFormDataParameterSubSchema creates an object of type FormDataParameterSubSchema if possible, returning an error if not. -func NewFormDataParameterSubSchema(in interface{}, context *compiler.Context) (*FormDataParameterSubSchema, error) { - errors := make([]error, 0) - x := &FormDataParameterSubSchema{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"allowEmptyValue", "collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // bool required = 1; - v1 := compiler.MapValueForKey(m, "required") - if v1 != nil { - x.Required, ok = v1.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string in = 2; - v2 := compiler.MapValueForKey(m, "in") - if v2 != nil { - x.In, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [formData] - if ok && !compiler.StringArrayContainsValue([]string{"formData"}, x.In) { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 3; - v3 := compiler.MapValueForKey(m, "description") - if v3 != nil { - x.Description, ok = v3.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string name = 4; - v4 := compiler.MapValueForKey(m, "name") - if v4 != nil { - x.Name, ok = v4.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool allow_empty_value = 5; - v5 := compiler.MapValueForKey(m, "allowEmptyValue") - if v5 != nil { - x.AllowEmptyValue, ok = v5.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for allowEmptyValue: %+v (%T)", v5, v5) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string type = 6; - v6 := compiler.MapValueForKey(m, "type") - if v6 != nil { - x.Type, ok = v6.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [string number boolean integer array file] - if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array", "file"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string format = 7; - v7 := compiler.MapValueForKey(m, "format") - if v7 != nil { - x.Format, ok = v7.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v7, v7) - errors = append(errors, compiler.NewError(context, message)) - } - } - // PrimitivesItems items = 8; - v8 := compiler.MapValueForKey(m, "items") - if v8 != nil { - var err error - x.Items, err = NewPrimitivesItems(v8, compiler.NewContext("items", context)) - if err != nil { - errors = append(errors, err) - } - } - // string collection_format = 9; - v9 := compiler.MapValueForKey(m, "collectionFormat") - if v9 != nil { - x.CollectionFormat, ok = v9.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [csv ssv tsv pipes multi] - if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes", "multi"}, x.CollectionFormat) { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any default = 10; - v10 := compiler.MapValueForKey(m, "default") - if v10 != nil { - var err error - x.Default, err = NewAny(v10, compiler.NewContext("default", context)) - if err != nil { - errors = append(errors, err) - } - } - // float maximum = 11; - v11 := compiler.MapValueForKey(m, "maximum") - if v11 != nil { - switch v11 := v11.(type) { - case float64: - x.Maximum = v11 - case float32: - x.Maximum = float64(v11) - case uint64: - x.Maximum = float64(v11) - case uint32: - x.Maximum = float64(v11) - case int64: - x.Maximum = float64(v11) - case int32: - x.Maximum = float64(v11) - case int: - x.Maximum = float64(v11) - default: - message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v11, v11) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_maximum = 12; - v12 := compiler.MapValueForKey(m, "exclusiveMaximum") - if v12 != nil { - x.ExclusiveMaximum, ok = v12.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v12, v12) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float minimum = 13; - v13 := compiler.MapValueForKey(m, "minimum") - if v13 != nil { - switch v13 := v13.(type) { - case float64: - x.Minimum = v13 - case float32: - x.Minimum = float64(v13) - case uint64: - x.Minimum = float64(v13) - case uint32: - x.Minimum = float64(v13) - case int64: - x.Minimum = float64(v13) - case int32: - x.Minimum = float64(v13) - case int: - x.Minimum = float64(v13) - default: - message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v13, v13) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_minimum = 14; - v14 := compiler.MapValueForKey(m, "exclusiveMinimum") - if v14 != nil { - x.ExclusiveMinimum, ok = v14.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v14, v14) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_length = 15; - v15 := compiler.MapValueForKey(m, "maxLength") - if v15 != nil { - t, ok := v15.(int) - if ok { - x.MaxLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v15, v15) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_length = 16; - v16 := compiler.MapValueForKey(m, "minLength") - if v16 != nil { - t, ok := v16.(int) - if ok { - x.MinLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v16, v16) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string pattern = 17; - v17 := compiler.MapValueForKey(m, "pattern") - if v17 != nil { - x.Pattern, ok = v17.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v17, v17) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_items = 18; - v18 := compiler.MapValueForKey(m, "maxItems") - if v18 != nil { - t, ok := v18.(int) - if ok { - x.MaxItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v18, v18) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_items = 19; - v19 := compiler.MapValueForKey(m, "minItems") - if v19 != nil { - t, ok := v19.(int) - if ok { - x.MinItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v19, v19) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool unique_items = 20; - v20 := compiler.MapValueForKey(m, "uniqueItems") - if v20 != nil { - x.UniqueItems, ok = v20.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v20, v20) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated Any enum = 21; - v21 := compiler.MapValueForKey(m, "enum") - if v21 != nil { - // repeated Any - x.Enum = make([]*Any, 0) - a, ok := v21.([]interface{}) - if ok { - for _, item := range a { - y, err := NewAny(item, compiler.NewContext("enum", context)) - if err != nil { - errors = append(errors, err) - } - x.Enum = append(x.Enum, y) - } - } - } - // float multiple_of = 22; - v22 := compiler.MapValueForKey(m, "multipleOf") - if v22 != nil { - switch v22 := v22.(type) { - case float64: - x.MultipleOf = v22 - case float32: - x.MultipleOf = float64(v22) - case uint64: - x.MultipleOf = float64(v22) - case uint32: - x.MultipleOf = float64(v22) - case int64: - x.MultipleOf = float64(v22) - case int32: - x.MultipleOf = float64(v22) - case int: - x.MultipleOf = float64(v22) - default: - message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v22, v22) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 23; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewHeader creates an object of type Header if possible, returning an error if not. -func NewHeader(in interface{}, context *compiler.Context) (*Header, error) { - errors := make([]error, 0) - x := &Header{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "pattern", "type", "uniqueItems"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [string number integer boolean array] - if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "integer", "boolean", "array"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string format = 2; - v2 := compiler.MapValueForKey(m, "format") - if v2 != nil { - x.Format, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // PrimitivesItems items = 3; - v3 := compiler.MapValueForKey(m, "items") - if v3 != nil { - var err error - x.Items, err = NewPrimitivesItems(v3, compiler.NewContext("items", context)) - if err != nil { - errors = append(errors, err) - } - } - // string collection_format = 4; - v4 := compiler.MapValueForKey(m, "collectionFormat") - if v4 != nil { - x.CollectionFormat, ok = v4.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [csv ssv tsv pipes] - if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any default = 5; - v5 := compiler.MapValueForKey(m, "default") - if v5 != nil { - var err error - x.Default, err = NewAny(v5, compiler.NewContext("default", context)) - if err != nil { - errors = append(errors, err) - } - } - // float maximum = 6; - v6 := compiler.MapValueForKey(m, "maximum") - if v6 != nil { - switch v6 := v6.(type) { - case float64: - x.Maximum = v6 - case float32: - x.Maximum = float64(v6) - case uint64: - x.Maximum = float64(v6) - case uint32: - x.Maximum = float64(v6) - case int64: - x.Maximum = float64(v6) - case int32: - x.Maximum = float64(v6) - case int: - x.Maximum = float64(v6) - default: - message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v6, v6) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_maximum = 7; - v7 := compiler.MapValueForKey(m, "exclusiveMaximum") - if v7 != nil { - x.ExclusiveMaximum, ok = v7.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v7, v7) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float minimum = 8; - v8 := compiler.MapValueForKey(m, "minimum") - if v8 != nil { - switch v8 := v8.(type) { - case float64: - x.Minimum = v8 - case float32: - x.Minimum = float64(v8) - case uint64: - x.Minimum = float64(v8) - case uint32: - x.Minimum = float64(v8) - case int64: - x.Minimum = float64(v8) - case int32: - x.Minimum = float64(v8) - case int: - x.Minimum = float64(v8) - default: - message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v8, v8) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_minimum = 9; - v9 := compiler.MapValueForKey(m, "exclusiveMinimum") - if v9 != nil { - x.ExclusiveMinimum, ok = v9.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v9, v9) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_length = 10; - v10 := compiler.MapValueForKey(m, "maxLength") - if v10 != nil { - t, ok := v10.(int) - if ok { - x.MaxLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v10, v10) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_length = 11; - v11 := compiler.MapValueForKey(m, "minLength") - if v11 != nil { - t, ok := v11.(int) - if ok { - x.MinLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v11, v11) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string pattern = 12; - v12 := compiler.MapValueForKey(m, "pattern") - if v12 != nil { - x.Pattern, ok = v12.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v12, v12) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_items = 13; - v13 := compiler.MapValueForKey(m, "maxItems") - if v13 != nil { - t, ok := v13.(int) - if ok { - x.MaxItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v13, v13) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_items = 14; - v14 := compiler.MapValueForKey(m, "minItems") - if v14 != nil { - t, ok := v14.(int) - if ok { - x.MinItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v14, v14) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool unique_items = 15; - v15 := compiler.MapValueForKey(m, "uniqueItems") - if v15 != nil { - x.UniqueItems, ok = v15.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v15, v15) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated Any enum = 16; - v16 := compiler.MapValueForKey(m, "enum") - if v16 != nil { - // repeated Any - x.Enum = make([]*Any, 0) - a, ok := v16.([]interface{}) - if ok { - for _, item := range a { - y, err := NewAny(item, compiler.NewContext("enum", context)) - if err != nil { - errors = append(errors, err) - } - x.Enum = append(x.Enum, y) - } - } - } - // float multiple_of = 17; - v17 := compiler.MapValueForKey(m, "multipleOf") - if v17 != nil { - switch v17 := v17.(type) { - case float64: - x.MultipleOf = v17 - case float32: - x.MultipleOf = float64(v17) - case uint64: - x.MultipleOf = float64(v17) - case uint32: - x.MultipleOf = float64(v17) - case int64: - x.MultipleOf = float64(v17) - case int32: - x.MultipleOf = float64(v17) - case int: - x.MultipleOf = float64(v17) - default: - message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v17, v17) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 18; - v18 := compiler.MapValueForKey(m, "description") - if v18 != nil { - x.Description, ok = v18.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v18, v18) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 19; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewHeaderParameterSubSchema creates an object of type HeaderParameterSubSchema if possible, returning an error if not. -func NewHeaderParameterSubSchema(in interface{}, context *compiler.Context) (*HeaderParameterSubSchema, error) { - errors := make([]error, 0) - x := &HeaderParameterSubSchema{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // bool required = 1; - v1 := compiler.MapValueForKey(m, "required") - if v1 != nil { - x.Required, ok = v1.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string in = 2; - v2 := compiler.MapValueForKey(m, "in") - if v2 != nil { - x.In, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [header] - if ok && !compiler.StringArrayContainsValue([]string{"header"}, x.In) { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 3; - v3 := compiler.MapValueForKey(m, "description") - if v3 != nil { - x.Description, ok = v3.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string name = 4; - v4 := compiler.MapValueForKey(m, "name") - if v4 != nil { - x.Name, ok = v4.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string type = 5; - v5 := compiler.MapValueForKey(m, "type") - if v5 != nil { - x.Type, ok = v5.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [string number boolean integer array] - if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string format = 6; - v6 := compiler.MapValueForKey(m, "format") - if v6 != nil { - x.Format, ok = v6.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v6, v6) - errors = append(errors, compiler.NewError(context, message)) - } - } - // PrimitivesItems items = 7; - v7 := compiler.MapValueForKey(m, "items") - if v7 != nil { - var err error - x.Items, err = NewPrimitivesItems(v7, compiler.NewContext("items", context)) - if err != nil { - errors = append(errors, err) - } - } - // string collection_format = 8; - v8 := compiler.MapValueForKey(m, "collectionFormat") - if v8 != nil { - x.CollectionFormat, ok = v8.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [csv ssv tsv pipes] - if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any default = 9; - v9 := compiler.MapValueForKey(m, "default") - if v9 != nil { - var err error - x.Default, err = NewAny(v9, compiler.NewContext("default", context)) - if err != nil { - errors = append(errors, err) - } - } - // float maximum = 10; - v10 := compiler.MapValueForKey(m, "maximum") - if v10 != nil { - switch v10 := v10.(type) { - case float64: - x.Maximum = v10 - case float32: - x.Maximum = float64(v10) - case uint64: - x.Maximum = float64(v10) - case uint32: - x.Maximum = float64(v10) - case int64: - x.Maximum = float64(v10) - case int32: - x.Maximum = float64(v10) - case int: - x.Maximum = float64(v10) - default: - message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v10, v10) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_maximum = 11; - v11 := compiler.MapValueForKey(m, "exclusiveMaximum") - if v11 != nil { - x.ExclusiveMaximum, ok = v11.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v11, v11) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float minimum = 12; - v12 := compiler.MapValueForKey(m, "minimum") - if v12 != nil { - switch v12 := v12.(type) { - case float64: - x.Minimum = v12 - case float32: - x.Minimum = float64(v12) - case uint64: - x.Minimum = float64(v12) - case uint32: - x.Minimum = float64(v12) - case int64: - x.Minimum = float64(v12) - case int32: - x.Minimum = float64(v12) - case int: - x.Minimum = float64(v12) - default: - message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v12, v12) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_minimum = 13; - v13 := compiler.MapValueForKey(m, "exclusiveMinimum") - if v13 != nil { - x.ExclusiveMinimum, ok = v13.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v13, v13) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_length = 14; - v14 := compiler.MapValueForKey(m, "maxLength") - if v14 != nil { - t, ok := v14.(int) - if ok { - x.MaxLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v14, v14) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_length = 15; - v15 := compiler.MapValueForKey(m, "minLength") - if v15 != nil { - t, ok := v15.(int) - if ok { - x.MinLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v15, v15) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string pattern = 16; - v16 := compiler.MapValueForKey(m, "pattern") - if v16 != nil { - x.Pattern, ok = v16.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v16, v16) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_items = 17; - v17 := compiler.MapValueForKey(m, "maxItems") - if v17 != nil { - t, ok := v17.(int) - if ok { - x.MaxItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v17, v17) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_items = 18; - v18 := compiler.MapValueForKey(m, "minItems") - if v18 != nil { - t, ok := v18.(int) - if ok { - x.MinItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v18, v18) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool unique_items = 19; - v19 := compiler.MapValueForKey(m, "uniqueItems") - if v19 != nil { - x.UniqueItems, ok = v19.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v19, v19) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated Any enum = 20; - v20 := compiler.MapValueForKey(m, "enum") - if v20 != nil { - // repeated Any - x.Enum = make([]*Any, 0) - a, ok := v20.([]interface{}) - if ok { - for _, item := range a { - y, err := NewAny(item, compiler.NewContext("enum", context)) - if err != nil { - errors = append(errors, err) - } - x.Enum = append(x.Enum, y) - } - } - } - // float multiple_of = 21; - v21 := compiler.MapValueForKey(m, "multipleOf") - if v21 != nil { - switch v21 := v21.(type) { - case float64: - x.MultipleOf = v21 - case float32: - x.MultipleOf = float64(v21) - case uint64: - x.MultipleOf = float64(v21) - case uint32: - x.MultipleOf = float64(v21) - case int64: - x.MultipleOf = float64(v21) - case int32: - x.MultipleOf = float64(v21) - case int: - x.MultipleOf = float64(v21) - default: - message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v21, v21) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 22; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewHeaders creates an object of type Headers if possible, returning an error if not. -func NewHeaders(in interface{}, context *compiler.Context) (*Headers, error) { - errors := make([]error, 0) - x := &Headers{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedHeader additional_properties = 1; - // MAP: Header - x.AdditionalProperties = make([]*NamedHeader, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - pair := &NamedHeader{} - pair.Name = k - var err error - pair.Value, err = NewHeader(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewInfo creates an object of type Info if possible, returning an error if not. -func NewInfo(in interface{}, context *compiler.Context) (*Info, error) { - errors := make([]error, 0) - x := &Info{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"title", "version"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"contact", "description", "license", "termsOfService", "title", "version"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string title = 1; - v1 := compiler.MapValueForKey(m, "title") - if v1 != nil { - x.Title, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for title: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string version = 2; - v2 := compiler.MapValueForKey(m, "version") - if v2 != nil { - x.Version, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for version: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 3; - v3 := compiler.MapValueForKey(m, "description") - if v3 != nil { - x.Description, ok = v3.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string terms_of_service = 4; - v4 := compiler.MapValueForKey(m, "termsOfService") - if v4 != nil { - x.TermsOfService, ok = v4.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for termsOfService: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Contact contact = 5; - v5 := compiler.MapValueForKey(m, "contact") - if v5 != nil { - var err error - x.Contact, err = NewContact(v5, compiler.NewContext("contact", context)) - if err != nil { - errors = append(errors, err) - } - } - // License license = 6; - v6 := compiler.MapValueForKey(m, "license") - if v6 != nil { - var err error - x.License, err = NewLicense(v6, compiler.NewContext("license", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny vendor_extension = 7; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewItemsItem creates an object of type ItemsItem if possible, returning an error if not. -func NewItemsItem(in interface{}, context *compiler.Context) (*ItemsItem, error) { - errors := make([]error, 0) - x := &ItemsItem{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value for item array: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - x.Schema = make([]*Schema, 0) - y, err := NewSchema(m, compiler.NewContext("", context)) - if err != nil { - return nil, err - } - x.Schema = append(x.Schema, y) - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewJsonReference creates an object of type JsonReference if possible, returning an error if not. -func NewJsonReference(in interface{}, context *compiler.Context) (*JsonReference, error) { - errors := make([]error, 0) - x := &JsonReference{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"$ref"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"$ref", "description"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string _ref = 1; - v1 := compiler.MapValueForKey(m, "$ref") - if v1 != nil { - x.XRef, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for $ref: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 2; - v2 := compiler.MapValueForKey(m, "description") - if v2 != nil { - x.Description, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewLicense creates an object of type License if possible, returning an error if not. -func NewLicense(in interface{}, context *compiler.Context) (*License, error) { - errors := make([]error, 0) - x := &License{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"name"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"name", "url"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string url = 2; - v2 := compiler.MapValueForKey(m, "url") - if v2 != nil { - x.Url, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for url: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 3; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedAny creates an object of type NamedAny if possible, returning an error if not. -func NewNamedAny(in interface{}, context *compiler.Context) (*NamedAny, error) { - errors := make([]error, 0) - x := &NamedAny{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewAny(v2, compiler.NewContext("value", context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedHeader creates an object of type NamedHeader if possible, returning an error if not. -func NewNamedHeader(in interface{}, context *compiler.Context) (*NamedHeader, error) { - errors := make([]error, 0) - x := &NamedHeader{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Header value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewHeader(v2, compiler.NewContext("value", context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedParameter creates an object of type NamedParameter if possible, returning an error if not. -func NewNamedParameter(in interface{}, context *compiler.Context) (*NamedParameter, error) { - errors := make([]error, 0) - x := &NamedParameter{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Parameter value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewParameter(v2, compiler.NewContext("value", context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedPathItem creates an object of type NamedPathItem if possible, returning an error if not. -func NewNamedPathItem(in interface{}, context *compiler.Context) (*NamedPathItem, error) { - errors := make([]error, 0) - x := &NamedPathItem{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // PathItem value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewPathItem(v2, compiler.NewContext("value", context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedResponse creates an object of type NamedResponse if possible, returning an error if not. -func NewNamedResponse(in interface{}, context *compiler.Context) (*NamedResponse, error) { - errors := make([]error, 0) - x := &NamedResponse{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Response value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewResponse(v2, compiler.NewContext("value", context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedResponseValue creates an object of type NamedResponseValue if possible, returning an error if not. -func NewNamedResponseValue(in interface{}, context *compiler.Context) (*NamedResponseValue, error) { - errors := make([]error, 0) - x := &NamedResponseValue{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // ResponseValue value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewResponseValue(v2, compiler.NewContext("value", context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedSchema creates an object of type NamedSchema if possible, returning an error if not. -func NewNamedSchema(in interface{}, context *compiler.Context) (*NamedSchema, error) { - errors := make([]error, 0) - x := &NamedSchema{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Schema value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewSchema(v2, compiler.NewContext("value", context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedSecurityDefinitionsItem creates an object of type NamedSecurityDefinitionsItem if possible, returning an error if not. -func NewNamedSecurityDefinitionsItem(in interface{}, context *compiler.Context) (*NamedSecurityDefinitionsItem, error) { - errors := make([]error, 0) - x := &NamedSecurityDefinitionsItem{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // SecurityDefinitionsItem value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewSecurityDefinitionsItem(v2, compiler.NewContext("value", context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedString creates an object of type NamedString if possible, returning an error if not. -func NewNamedString(in interface{}, context *compiler.Context) (*NamedString, error) { - errors := make([]error, 0) - x := &NamedString{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - x.Value, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for value: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedStringArray creates an object of type NamedStringArray if possible, returning an error if not. -func NewNamedStringArray(in interface{}, context *compiler.Context) (*NamedStringArray, error) { - errors := make([]error, 0) - x := &NamedStringArray{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // StringArray value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewStringArray(v2, compiler.NewContext("value", context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNonBodyParameter creates an object of type NonBodyParameter if possible, returning an error if not. -func NewNonBodyParameter(in interface{}, context *compiler.Context) (*NonBodyParameter, error) { - errors := make([]error, 0) - x := &NonBodyParameter{} - matched := false - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"in", "name", "type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // HeaderParameterSubSchema header_parameter_sub_schema = 1; - { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewHeaderParameterSubSchema(m, compiler.NewContext("headerParameterSubSchema", context)) - if matchingError == nil { - x.Oneof = &NonBodyParameter_HeaderParameterSubSchema{HeaderParameterSubSchema: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - // FormDataParameterSubSchema form_data_parameter_sub_schema = 2; - { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewFormDataParameterSubSchema(m, compiler.NewContext("formDataParameterSubSchema", context)) - if matchingError == nil { - x.Oneof = &NonBodyParameter_FormDataParameterSubSchema{FormDataParameterSubSchema: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - // QueryParameterSubSchema query_parameter_sub_schema = 3; - { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewQueryParameterSubSchema(m, compiler.NewContext("queryParameterSubSchema", context)) - if matchingError == nil { - x.Oneof = &NonBodyParameter_QueryParameterSubSchema{QueryParameterSubSchema: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - // PathParameterSubSchema path_parameter_sub_schema = 4; - { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewPathParameterSubSchema(m, compiler.NewContext("pathParameterSubSchema", context)) - if matchingError == nil { - x.Oneof = &NonBodyParameter_PathParameterSubSchema{PathParameterSubSchema: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewOauth2AccessCodeSecurity creates an object of type Oauth2AccessCodeSecurity if possible, returning an error if not. -func NewOauth2AccessCodeSecurity(in interface{}, context *compiler.Context) (*Oauth2AccessCodeSecurity, error) { - errors := make([]error, 0) - x := &Oauth2AccessCodeSecurity{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"authorizationUrl", "flow", "tokenUrl", "type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"authorizationUrl", "description", "flow", "scopes", "tokenUrl", "type"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [oauth2] - if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string flow = 2; - v2 := compiler.MapValueForKey(m, "flow") - if v2 != nil { - x.Flow, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [accessCode] - if ok && !compiler.StringArrayContainsValue([]string{"accessCode"}, x.Flow) { - message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Oauth2Scopes scopes = 3; - v3 := compiler.MapValueForKey(m, "scopes") - if v3 != nil { - var err error - x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) - if err != nil { - errors = append(errors, err) - } - } - // string authorization_url = 4; - v4 := compiler.MapValueForKey(m, "authorizationUrl") - if v4 != nil { - x.AuthorizationUrl, ok = v4.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for authorizationUrl: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string token_url = 5; - v5 := compiler.MapValueForKey(m, "tokenUrl") - if v5 != nil { - x.TokenUrl, ok = v5.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for tokenUrl: %+v (%T)", v5, v5) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 6; - v6 := compiler.MapValueForKey(m, "description") - if v6 != nil { - x.Description, ok = v6.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v6, v6) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 7; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewOauth2ApplicationSecurity creates an object of type Oauth2ApplicationSecurity if possible, returning an error if not. -func NewOauth2ApplicationSecurity(in interface{}, context *compiler.Context) (*Oauth2ApplicationSecurity, error) { - errors := make([]error, 0) - x := &Oauth2ApplicationSecurity{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"flow", "tokenUrl", "type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "flow", "scopes", "tokenUrl", "type"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [oauth2] - if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string flow = 2; - v2 := compiler.MapValueForKey(m, "flow") - if v2 != nil { - x.Flow, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [application] - if ok && !compiler.StringArrayContainsValue([]string{"application"}, x.Flow) { - message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Oauth2Scopes scopes = 3; - v3 := compiler.MapValueForKey(m, "scopes") - if v3 != nil { - var err error - x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) - if err != nil { - errors = append(errors, err) - } - } - // string token_url = 4; - v4 := compiler.MapValueForKey(m, "tokenUrl") - if v4 != nil { - x.TokenUrl, ok = v4.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for tokenUrl: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 5; - v5 := compiler.MapValueForKey(m, "description") - if v5 != nil { - x.Description, ok = v5.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v5, v5) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 6; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewOauth2ImplicitSecurity creates an object of type Oauth2ImplicitSecurity if possible, returning an error if not. -func NewOauth2ImplicitSecurity(in interface{}, context *compiler.Context) (*Oauth2ImplicitSecurity, error) { - errors := make([]error, 0) - x := &Oauth2ImplicitSecurity{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"authorizationUrl", "flow", "type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"authorizationUrl", "description", "flow", "scopes", "type"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [oauth2] - if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string flow = 2; - v2 := compiler.MapValueForKey(m, "flow") - if v2 != nil { - x.Flow, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [implicit] - if ok && !compiler.StringArrayContainsValue([]string{"implicit"}, x.Flow) { - message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Oauth2Scopes scopes = 3; - v3 := compiler.MapValueForKey(m, "scopes") - if v3 != nil { - var err error - x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) - if err != nil { - errors = append(errors, err) - } - } - // string authorization_url = 4; - v4 := compiler.MapValueForKey(m, "authorizationUrl") - if v4 != nil { - x.AuthorizationUrl, ok = v4.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for authorizationUrl: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 5; - v5 := compiler.MapValueForKey(m, "description") - if v5 != nil { - x.Description, ok = v5.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v5, v5) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 6; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewOauth2PasswordSecurity creates an object of type Oauth2PasswordSecurity if possible, returning an error if not. -func NewOauth2PasswordSecurity(in interface{}, context *compiler.Context) (*Oauth2PasswordSecurity, error) { - errors := make([]error, 0) - x := &Oauth2PasswordSecurity{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"flow", "tokenUrl", "type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "flow", "scopes", "tokenUrl", "type"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [oauth2] - if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string flow = 2; - v2 := compiler.MapValueForKey(m, "flow") - if v2 != nil { - x.Flow, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [password] - if ok && !compiler.StringArrayContainsValue([]string{"password"}, x.Flow) { - message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Oauth2Scopes scopes = 3; - v3 := compiler.MapValueForKey(m, "scopes") - if v3 != nil { - var err error - x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) - if err != nil { - errors = append(errors, err) - } - } - // string token_url = 4; - v4 := compiler.MapValueForKey(m, "tokenUrl") - if v4 != nil { - x.TokenUrl, ok = v4.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for tokenUrl: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 5; - v5 := compiler.MapValueForKey(m, "description") - if v5 != nil { - x.Description, ok = v5.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v5, v5) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 6; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewOauth2Scopes creates an object of type Oauth2Scopes if possible, returning an error if not. -func NewOauth2Scopes(in interface{}, context *compiler.Context) (*Oauth2Scopes, error) { - errors := make([]error, 0) - x := &Oauth2Scopes{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedString additional_properties = 1; - // MAP: string - x.AdditionalProperties = make([]*NamedString, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - pair := &NamedString{} - pair.Name = k - pair.Value = v.(string) - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewOperation creates an object of type Operation if possible, returning an error if not. -func NewOperation(in interface{}, context *compiler.Context) (*Operation, error) { - errors := make([]error, 0) - x := &Operation{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"responses"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"consumes", "deprecated", "description", "externalDocs", "operationId", "parameters", "produces", "responses", "schemes", "security", "summary", "tags"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // repeated string tags = 1; - v1 := compiler.MapValueForKey(m, "tags") - if v1 != nil { - v, ok := v1.([]interface{}) - if ok { - x.Tags = compiler.ConvertInterfaceArrayToStringArray(v) - } else { - message := fmt.Sprintf("has unexpected value for tags: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string summary = 2; - v2 := compiler.MapValueForKey(m, "summary") - if v2 != nil { - x.Summary, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for summary: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 3; - v3 := compiler.MapValueForKey(m, "description") - if v3 != nil { - x.Description, ok = v3.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - } - // ExternalDocs external_docs = 4; - v4 := compiler.MapValueForKey(m, "externalDocs") - if v4 != nil { - var err error - x.ExternalDocs, err = NewExternalDocs(v4, compiler.NewContext("externalDocs", context)) - if err != nil { - errors = append(errors, err) - } - } - // string operation_id = 5; - v5 := compiler.MapValueForKey(m, "operationId") - if v5 != nil { - x.OperationId, ok = v5.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for operationId: %+v (%T)", v5, v5) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated string produces = 6; - v6 := compiler.MapValueForKey(m, "produces") - if v6 != nil { - v, ok := v6.([]interface{}) - if ok { - x.Produces = compiler.ConvertInterfaceArrayToStringArray(v) - } else { - message := fmt.Sprintf("has unexpected value for produces: %+v (%T)", v6, v6) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated string consumes = 7; - v7 := compiler.MapValueForKey(m, "consumes") - if v7 != nil { - v, ok := v7.([]interface{}) - if ok { - x.Consumes = compiler.ConvertInterfaceArrayToStringArray(v) - } else { - message := fmt.Sprintf("has unexpected value for consumes: %+v (%T)", v7, v7) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated ParametersItem parameters = 8; - v8 := compiler.MapValueForKey(m, "parameters") - if v8 != nil { - // repeated ParametersItem - x.Parameters = make([]*ParametersItem, 0) - a, ok := v8.([]interface{}) - if ok { - for _, item := range a { - y, err := NewParametersItem(item, compiler.NewContext("parameters", context)) - if err != nil { - errors = append(errors, err) - } - x.Parameters = append(x.Parameters, y) - } - } - } - // Responses responses = 9; - v9 := compiler.MapValueForKey(m, "responses") - if v9 != nil { - var err error - x.Responses, err = NewResponses(v9, compiler.NewContext("responses", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated string schemes = 10; - v10 := compiler.MapValueForKey(m, "schemes") - if v10 != nil { - v, ok := v10.([]interface{}) - if ok { - x.Schemes = compiler.ConvertInterfaceArrayToStringArray(v) - } else { - message := fmt.Sprintf("has unexpected value for schemes: %+v (%T)", v10, v10) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [http https ws wss] - if ok && !compiler.StringArrayContainsValues([]string{"http", "https", "ws", "wss"}, x.Schemes) { - message := fmt.Sprintf("has unexpected value for schemes: %+v", v10) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool deprecated = 11; - v11 := compiler.MapValueForKey(m, "deprecated") - if v11 != nil { - x.Deprecated, ok = v11.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for deprecated: %+v (%T)", v11, v11) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated SecurityRequirement security = 12; - v12 := compiler.MapValueForKey(m, "security") - if v12 != nil { - // repeated SecurityRequirement - x.Security = make([]*SecurityRequirement, 0) - a, ok := v12.([]interface{}) - if ok { - for _, item := range a { - y, err := NewSecurityRequirement(item, compiler.NewContext("security", context)) - if err != nil { - errors = append(errors, err) - } - x.Security = append(x.Security, y) - } - } - } - // repeated NamedAny vendor_extension = 13; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewParameter creates an object of type Parameter if possible, returning an error if not. -func NewParameter(in interface{}, context *compiler.Context) (*Parameter, error) { - errors := make([]error, 0) - x := &Parameter{} - matched := false - // BodyParameter body_parameter = 1; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewBodyParameter(m, compiler.NewContext("bodyParameter", context)) - if matchingError == nil { - x.Oneof = &Parameter_BodyParameter{BodyParameter: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // NonBodyParameter non_body_parameter = 2; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewNonBodyParameter(m, compiler.NewContext("nonBodyParameter", context)) - if matchingError == nil { - x.Oneof = &Parameter_NonBodyParameter{NonBodyParameter: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewParameterDefinitions creates an object of type ParameterDefinitions if possible, returning an error if not. -func NewParameterDefinitions(in interface{}, context *compiler.Context) (*ParameterDefinitions, error) { - errors := make([]error, 0) - x := &ParameterDefinitions{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedParameter additional_properties = 1; - // MAP: Parameter - x.AdditionalProperties = make([]*NamedParameter, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - pair := &NamedParameter{} - pair.Name = k - var err error - pair.Value, err = NewParameter(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewParametersItem creates an object of type ParametersItem if possible, returning an error if not. -func NewParametersItem(in interface{}, context *compiler.Context) (*ParametersItem, error) { - errors := make([]error, 0) - x := &ParametersItem{} - matched := false - // Parameter parameter = 1; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewParameter(m, compiler.NewContext("parameter", context)) - if matchingError == nil { - x.Oneof = &ParametersItem_Parameter{Parameter: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // JsonReference json_reference = 2; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewJsonReference(m, compiler.NewContext("jsonReference", context)) - if matchingError == nil { - x.Oneof = &ParametersItem_JsonReference{JsonReference: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewPathItem creates an object of type PathItem if possible, returning an error if not. -func NewPathItem(in interface{}, context *compiler.Context) (*PathItem, error) { - errors := make([]error, 0) - x := &PathItem{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"$ref", "delete", "get", "head", "options", "parameters", "patch", "post", "put"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string _ref = 1; - v1 := compiler.MapValueForKey(m, "$ref") - if v1 != nil { - x.XRef, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for $ref: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Operation get = 2; - v2 := compiler.MapValueForKey(m, "get") - if v2 != nil { - var err error - x.Get, err = NewOperation(v2, compiler.NewContext("get", context)) - if err != nil { - errors = append(errors, err) - } - } - // Operation put = 3; - v3 := compiler.MapValueForKey(m, "put") - if v3 != nil { - var err error - x.Put, err = NewOperation(v3, compiler.NewContext("put", context)) - if err != nil { - errors = append(errors, err) - } - } - // Operation post = 4; - v4 := compiler.MapValueForKey(m, "post") - if v4 != nil { - var err error - x.Post, err = NewOperation(v4, compiler.NewContext("post", context)) - if err != nil { - errors = append(errors, err) - } - } - // Operation delete = 5; - v5 := compiler.MapValueForKey(m, "delete") - if v5 != nil { - var err error - x.Delete, err = NewOperation(v5, compiler.NewContext("delete", context)) - if err != nil { - errors = append(errors, err) - } - } - // Operation options = 6; - v6 := compiler.MapValueForKey(m, "options") - if v6 != nil { - var err error - x.Options, err = NewOperation(v6, compiler.NewContext("options", context)) - if err != nil { - errors = append(errors, err) - } - } - // Operation head = 7; - v7 := compiler.MapValueForKey(m, "head") - if v7 != nil { - var err error - x.Head, err = NewOperation(v7, compiler.NewContext("head", context)) - if err != nil { - errors = append(errors, err) - } - } - // Operation patch = 8; - v8 := compiler.MapValueForKey(m, "patch") - if v8 != nil { - var err error - x.Patch, err = NewOperation(v8, compiler.NewContext("patch", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated ParametersItem parameters = 9; - v9 := compiler.MapValueForKey(m, "parameters") - if v9 != nil { - // repeated ParametersItem - x.Parameters = make([]*ParametersItem, 0) - a, ok := v9.([]interface{}) - if ok { - for _, item := range a { - y, err := NewParametersItem(item, compiler.NewContext("parameters", context)) - if err != nil { - errors = append(errors, err) - } - x.Parameters = append(x.Parameters, y) - } - } - } - // repeated NamedAny vendor_extension = 10; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewPathParameterSubSchema creates an object of type PathParameterSubSchema if possible, returning an error if not. -func NewPathParameterSubSchema(in interface{}, context *compiler.Context) (*PathParameterSubSchema, error) { - errors := make([]error, 0) - x := &PathParameterSubSchema{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"required"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // bool required = 1; - v1 := compiler.MapValueForKey(m, "required") - if v1 != nil { - x.Required, ok = v1.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string in = 2; - v2 := compiler.MapValueForKey(m, "in") - if v2 != nil { - x.In, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [path] - if ok && !compiler.StringArrayContainsValue([]string{"path"}, x.In) { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 3; - v3 := compiler.MapValueForKey(m, "description") - if v3 != nil { - x.Description, ok = v3.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string name = 4; - v4 := compiler.MapValueForKey(m, "name") - if v4 != nil { - x.Name, ok = v4.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string type = 5; - v5 := compiler.MapValueForKey(m, "type") - if v5 != nil { - x.Type, ok = v5.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [string number boolean integer array] - if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string format = 6; - v6 := compiler.MapValueForKey(m, "format") - if v6 != nil { - x.Format, ok = v6.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v6, v6) - errors = append(errors, compiler.NewError(context, message)) - } - } - // PrimitivesItems items = 7; - v7 := compiler.MapValueForKey(m, "items") - if v7 != nil { - var err error - x.Items, err = NewPrimitivesItems(v7, compiler.NewContext("items", context)) - if err != nil { - errors = append(errors, err) - } - } - // string collection_format = 8; - v8 := compiler.MapValueForKey(m, "collectionFormat") - if v8 != nil { - x.CollectionFormat, ok = v8.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [csv ssv tsv pipes] - if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any default = 9; - v9 := compiler.MapValueForKey(m, "default") - if v9 != nil { - var err error - x.Default, err = NewAny(v9, compiler.NewContext("default", context)) - if err != nil { - errors = append(errors, err) - } - } - // float maximum = 10; - v10 := compiler.MapValueForKey(m, "maximum") - if v10 != nil { - switch v10 := v10.(type) { - case float64: - x.Maximum = v10 - case float32: - x.Maximum = float64(v10) - case uint64: - x.Maximum = float64(v10) - case uint32: - x.Maximum = float64(v10) - case int64: - x.Maximum = float64(v10) - case int32: - x.Maximum = float64(v10) - case int: - x.Maximum = float64(v10) - default: - message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v10, v10) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_maximum = 11; - v11 := compiler.MapValueForKey(m, "exclusiveMaximum") - if v11 != nil { - x.ExclusiveMaximum, ok = v11.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v11, v11) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float minimum = 12; - v12 := compiler.MapValueForKey(m, "minimum") - if v12 != nil { - switch v12 := v12.(type) { - case float64: - x.Minimum = v12 - case float32: - x.Minimum = float64(v12) - case uint64: - x.Minimum = float64(v12) - case uint32: - x.Minimum = float64(v12) - case int64: - x.Minimum = float64(v12) - case int32: - x.Minimum = float64(v12) - case int: - x.Minimum = float64(v12) - default: - message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v12, v12) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_minimum = 13; - v13 := compiler.MapValueForKey(m, "exclusiveMinimum") - if v13 != nil { - x.ExclusiveMinimum, ok = v13.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v13, v13) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_length = 14; - v14 := compiler.MapValueForKey(m, "maxLength") - if v14 != nil { - t, ok := v14.(int) - if ok { - x.MaxLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v14, v14) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_length = 15; - v15 := compiler.MapValueForKey(m, "minLength") - if v15 != nil { - t, ok := v15.(int) - if ok { - x.MinLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v15, v15) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string pattern = 16; - v16 := compiler.MapValueForKey(m, "pattern") - if v16 != nil { - x.Pattern, ok = v16.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v16, v16) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_items = 17; - v17 := compiler.MapValueForKey(m, "maxItems") - if v17 != nil { - t, ok := v17.(int) - if ok { - x.MaxItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v17, v17) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_items = 18; - v18 := compiler.MapValueForKey(m, "minItems") - if v18 != nil { - t, ok := v18.(int) - if ok { - x.MinItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v18, v18) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool unique_items = 19; - v19 := compiler.MapValueForKey(m, "uniqueItems") - if v19 != nil { - x.UniqueItems, ok = v19.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v19, v19) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated Any enum = 20; - v20 := compiler.MapValueForKey(m, "enum") - if v20 != nil { - // repeated Any - x.Enum = make([]*Any, 0) - a, ok := v20.([]interface{}) - if ok { - for _, item := range a { - y, err := NewAny(item, compiler.NewContext("enum", context)) - if err != nil { - errors = append(errors, err) - } - x.Enum = append(x.Enum, y) - } - } - } - // float multiple_of = 21; - v21 := compiler.MapValueForKey(m, "multipleOf") - if v21 != nil { - switch v21 := v21.(type) { - case float64: - x.MultipleOf = v21 - case float32: - x.MultipleOf = float64(v21) - case uint64: - x.MultipleOf = float64(v21) - case uint32: - x.MultipleOf = float64(v21) - case int64: - x.MultipleOf = float64(v21) - case int32: - x.MultipleOf = float64(v21) - case int: - x.MultipleOf = float64(v21) - default: - message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v21, v21) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 22; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewPaths creates an object of type Paths if possible, returning an error if not. -func NewPaths(in interface{}, context *compiler.Context) (*Paths, error) { - errors := make([]error, 0) - x := &Paths{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{} - allowedPatterns := []*regexp.Regexp{pattern0, pattern1} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // repeated NamedAny vendor_extension = 1; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - // repeated NamedPathItem path = 2; - // MAP: PathItem ^/ - x.Path = make([]*NamedPathItem, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "/") { - pair := &NamedPathItem{} - pair.Name = k - var err error - pair.Value, err = NewPathItem(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - x.Path = append(x.Path, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewPrimitivesItems creates an object of type PrimitivesItems if possible, returning an error if not. -func NewPrimitivesItems(in interface{}, context *compiler.Context) (*PrimitivesItems, error) { - errors := make([]error, 0) - x := &PrimitivesItems{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"collectionFormat", "default", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "pattern", "type", "uniqueItems"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [string number integer boolean array] - if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "integer", "boolean", "array"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string format = 2; - v2 := compiler.MapValueForKey(m, "format") - if v2 != nil { - x.Format, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // PrimitivesItems items = 3; - v3 := compiler.MapValueForKey(m, "items") - if v3 != nil { - var err error - x.Items, err = NewPrimitivesItems(v3, compiler.NewContext("items", context)) - if err != nil { - errors = append(errors, err) - } - } - // string collection_format = 4; - v4 := compiler.MapValueForKey(m, "collectionFormat") - if v4 != nil { - x.CollectionFormat, ok = v4.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [csv ssv tsv pipes] - if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any default = 5; - v5 := compiler.MapValueForKey(m, "default") - if v5 != nil { - var err error - x.Default, err = NewAny(v5, compiler.NewContext("default", context)) - if err != nil { - errors = append(errors, err) - } - } - // float maximum = 6; - v6 := compiler.MapValueForKey(m, "maximum") - if v6 != nil { - switch v6 := v6.(type) { - case float64: - x.Maximum = v6 - case float32: - x.Maximum = float64(v6) - case uint64: - x.Maximum = float64(v6) - case uint32: - x.Maximum = float64(v6) - case int64: - x.Maximum = float64(v6) - case int32: - x.Maximum = float64(v6) - case int: - x.Maximum = float64(v6) - default: - message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v6, v6) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_maximum = 7; - v7 := compiler.MapValueForKey(m, "exclusiveMaximum") - if v7 != nil { - x.ExclusiveMaximum, ok = v7.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v7, v7) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float minimum = 8; - v8 := compiler.MapValueForKey(m, "minimum") - if v8 != nil { - switch v8 := v8.(type) { - case float64: - x.Minimum = v8 - case float32: - x.Minimum = float64(v8) - case uint64: - x.Minimum = float64(v8) - case uint32: - x.Minimum = float64(v8) - case int64: - x.Minimum = float64(v8) - case int32: - x.Minimum = float64(v8) - case int: - x.Minimum = float64(v8) - default: - message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v8, v8) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_minimum = 9; - v9 := compiler.MapValueForKey(m, "exclusiveMinimum") - if v9 != nil { - x.ExclusiveMinimum, ok = v9.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v9, v9) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_length = 10; - v10 := compiler.MapValueForKey(m, "maxLength") - if v10 != nil { - t, ok := v10.(int) - if ok { - x.MaxLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v10, v10) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_length = 11; - v11 := compiler.MapValueForKey(m, "minLength") - if v11 != nil { - t, ok := v11.(int) - if ok { - x.MinLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v11, v11) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string pattern = 12; - v12 := compiler.MapValueForKey(m, "pattern") - if v12 != nil { - x.Pattern, ok = v12.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v12, v12) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_items = 13; - v13 := compiler.MapValueForKey(m, "maxItems") - if v13 != nil { - t, ok := v13.(int) - if ok { - x.MaxItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v13, v13) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_items = 14; - v14 := compiler.MapValueForKey(m, "minItems") - if v14 != nil { - t, ok := v14.(int) - if ok { - x.MinItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v14, v14) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool unique_items = 15; - v15 := compiler.MapValueForKey(m, "uniqueItems") - if v15 != nil { - x.UniqueItems, ok = v15.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v15, v15) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated Any enum = 16; - v16 := compiler.MapValueForKey(m, "enum") - if v16 != nil { - // repeated Any - x.Enum = make([]*Any, 0) - a, ok := v16.([]interface{}) - if ok { - for _, item := range a { - y, err := NewAny(item, compiler.NewContext("enum", context)) - if err != nil { - errors = append(errors, err) - } - x.Enum = append(x.Enum, y) - } - } - } - // float multiple_of = 17; - v17 := compiler.MapValueForKey(m, "multipleOf") - if v17 != nil { - switch v17 := v17.(type) { - case float64: - x.MultipleOf = v17 - case float32: - x.MultipleOf = float64(v17) - case uint64: - x.MultipleOf = float64(v17) - case uint32: - x.MultipleOf = float64(v17) - case int64: - x.MultipleOf = float64(v17) - case int32: - x.MultipleOf = float64(v17) - case int: - x.MultipleOf = float64(v17) - default: - message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v17, v17) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 18; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewProperties creates an object of type Properties if possible, returning an error if not. -func NewProperties(in interface{}, context *compiler.Context) (*Properties, error) { - errors := make([]error, 0) - x := &Properties{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedSchema additional_properties = 1; - // MAP: Schema - x.AdditionalProperties = make([]*NamedSchema, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - pair := &NamedSchema{} - pair.Name = k - var err error - pair.Value, err = NewSchema(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewQueryParameterSubSchema creates an object of type QueryParameterSubSchema if possible, returning an error if not. -func NewQueryParameterSubSchema(in interface{}, context *compiler.Context) (*QueryParameterSubSchema, error) { - errors := make([]error, 0) - x := &QueryParameterSubSchema{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"allowEmptyValue", "collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // bool required = 1; - v1 := compiler.MapValueForKey(m, "required") - if v1 != nil { - x.Required, ok = v1.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string in = 2; - v2 := compiler.MapValueForKey(m, "in") - if v2 != nil { - x.In, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [query] - if ok && !compiler.StringArrayContainsValue([]string{"query"}, x.In) { - message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 3; - v3 := compiler.MapValueForKey(m, "description") - if v3 != nil { - x.Description, ok = v3.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string name = 4; - v4 := compiler.MapValueForKey(m, "name") - if v4 != nil { - x.Name, ok = v4.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool allow_empty_value = 5; - v5 := compiler.MapValueForKey(m, "allowEmptyValue") - if v5 != nil { - x.AllowEmptyValue, ok = v5.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for allowEmptyValue: %+v (%T)", v5, v5) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string type = 6; - v6 := compiler.MapValueForKey(m, "type") - if v6 != nil { - x.Type, ok = v6.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [string number boolean integer array] - if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string format = 7; - v7 := compiler.MapValueForKey(m, "format") - if v7 != nil { - x.Format, ok = v7.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v7, v7) - errors = append(errors, compiler.NewError(context, message)) - } - } - // PrimitivesItems items = 8; - v8 := compiler.MapValueForKey(m, "items") - if v8 != nil { - var err error - x.Items, err = NewPrimitivesItems(v8, compiler.NewContext("items", context)) - if err != nil { - errors = append(errors, err) - } - } - // string collection_format = 9; - v9 := compiler.MapValueForKey(m, "collectionFormat") - if v9 != nil { - x.CollectionFormat, ok = v9.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [csv ssv tsv pipes multi] - if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes", "multi"}, x.CollectionFormat) { - message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any default = 10; - v10 := compiler.MapValueForKey(m, "default") - if v10 != nil { - var err error - x.Default, err = NewAny(v10, compiler.NewContext("default", context)) - if err != nil { - errors = append(errors, err) - } - } - // float maximum = 11; - v11 := compiler.MapValueForKey(m, "maximum") - if v11 != nil { - switch v11 := v11.(type) { - case float64: - x.Maximum = v11 - case float32: - x.Maximum = float64(v11) - case uint64: - x.Maximum = float64(v11) - case uint32: - x.Maximum = float64(v11) - case int64: - x.Maximum = float64(v11) - case int32: - x.Maximum = float64(v11) - case int: - x.Maximum = float64(v11) - default: - message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v11, v11) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_maximum = 12; - v12 := compiler.MapValueForKey(m, "exclusiveMaximum") - if v12 != nil { - x.ExclusiveMaximum, ok = v12.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v12, v12) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float minimum = 13; - v13 := compiler.MapValueForKey(m, "minimum") - if v13 != nil { - switch v13 := v13.(type) { - case float64: - x.Minimum = v13 - case float32: - x.Minimum = float64(v13) - case uint64: - x.Minimum = float64(v13) - case uint32: - x.Minimum = float64(v13) - case int64: - x.Minimum = float64(v13) - case int32: - x.Minimum = float64(v13) - case int: - x.Minimum = float64(v13) - default: - message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v13, v13) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_minimum = 14; - v14 := compiler.MapValueForKey(m, "exclusiveMinimum") - if v14 != nil { - x.ExclusiveMinimum, ok = v14.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v14, v14) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_length = 15; - v15 := compiler.MapValueForKey(m, "maxLength") - if v15 != nil { - t, ok := v15.(int) - if ok { - x.MaxLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v15, v15) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_length = 16; - v16 := compiler.MapValueForKey(m, "minLength") - if v16 != nil { - t, ok := v16.(int) - if ok { - x.MinLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v16, v16) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string pattern = 17; - v17 := compiler.MapValueForKey(m, "pattern") - if v17 != nil { - x.Pattern, ok = v17.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v17, v17) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_items = 18; - v18 := compiler.MapValueForKey(m, "maxItems") - if v18 != nil { - t, ok := v18.(int) - if ok { - x.MaxItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v18, v18) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_items = 19; - v19 := compiler.MapValueForKey(m, "minItems") - if v19 != nil { - t, ok := v19.(int) - if ok { - x.MinItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v19, v19) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool unique_items = 20; - v20 := compiler.MapValueForKey(m, "uniqueItems") - if v20 != nil { - x.UniqueItems, ok = v20.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v20, v20) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated Any enum = 21; - v21 := compiler.MapValueForKey(m, "enum") - if v21 != nil { - // repeated Any - x.Enum = make([]*Any, 0) - a, ok := v21.([]interface{}) - if ok { - for _, item := range a { - y, err := NewAny(item, compiler.NewContext("enum", context)) - if err != nil { - errors = append(errors, err) - } - x.Enum = append(x.Enum, y) - } - } - } - // float multiple_of = 22; - v22 := compiler.MapValueForKey(m, "multipleOf") - if v22 != nil { - switch v22 := v22.(type) { - case float64: - x.MultipleOf = v22 - case float32: - x.MultipleOf = float64(v22) - case uint64: - x.MultipleOf = float64(v22) - case uint32: - x.MultipleOf = float64(v22) - case int64: - x.MultipleOf = float64(v22) - case int32: - x.MultipleOf = float64(v22) - case int: - x.MultipleOf = float64(v22) - default: - message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v22, v22) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 23; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewResponse creates an object of type Response if possible, returning an error if not. -func NewResponse(in interface{}, context *compiler.Context) (*Response, error) { - errors := make([]error, 0) - x := &Response{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"description"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "examples", "headers", "schema"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string description = 1; - v1 := compiler.MapValueForKey(m, "description") - if v1 != nil { - x.Description, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // SchemaItem schema = 2; - v2 := compiler.MapValueForKey(m, "schema") - if v2 != nil { - var err error - x.Schema, err = NewSchemaItem(v2, compiler.NewContext("schema", context)) - if err != nil { - errors = append(errors, err) - } - } - // Headers headers = 3; - v3 := compiler.MapValueForKey(m, "headers") - if v3 != nil { - var err error - x.Headers, err = NewHeaders(v3, compiler.NewContext("headers", context)) - if err != nil { - errors = append(errors, err) - } - } - // Examples examples = 4; - v4 := compiler.MapValueForKey(m, "examples") - if v4 != nil { - var err error - x.Examples, err = NewExamples(v4, compiler.NewContext("examples", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny vendor_extension = 5; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewResponseDefinitions creates an object of type ResponseDefinitions if possible, returning an error if not. -func NewResponseDefinitions(in interface{}, context *compiler.Context) (*ResponseDefinitions, error) { - errors := make([]error, 0) - x := &ResponseDefinitions{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedResponse additional_properties = 1; - // MAP: Response - x.AdditionalProperties = make([]*NamedResponse, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - pair := &NamedResponse{} - pair.Name = k - var err error - pair.Value, err = NewResponse(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewResponseValue creates an object of type ResponseValue if possible, returning an error if not. -func NewResponseValue(in interface{}, context *compiler.Context) (*ResponseValue, error) { - errors := make([]error, 0) - x := &ResponseValue{} - matched := false - // Response response = 1; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewResponse(m, compiler.NewContext("response", context)) - if matchingError == nil { - x.Oneof = &ResponseValue_Response{Response: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // JsonReference json_reference = 2; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewJsonReference(m, compiler.NewContext("jsonReference", context)) - if matchingError == nil { - x.Oneof = &ResponseValue_JsonReference{JsonReference: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewResponses creates an object of type Responses if possible, returning an error if not. -func NewResponses(in interface{}, context *compiler.Context) (*Responses, error) { - errors := make([]error, 0) - x := &Responses{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{} - allowedPatterns := []*regexp.Regexp{pattern2, pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // repeated NamedResponseValue response_code = 1; - // MAP: ResponseValue ^([0-9]{3})$|^(default)$ - x.ResponseCode = make([]*NamedResponseValue, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if pattern2.MatchString(k) { - pair := &NamedResponseValue{} - pair.Name = k - var err error - pair.Value, err = NewResponseValue(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - x.ResponseCode = append(x.ResponseCode, pair) - } - } - } - // repeated NamedAny vendor_extension = 2; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewSchema creates an object of type Schema if possible, returning an error if not. -func NewSchema(in interface{}, context *compiler.Context) (*Schema, error) { - errors := make([]error, 0) - x := &Schema{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"$ref", "additionalProperties", "allOf", "default", "description", "discriminator", "enum", "example", "exclusiveMaximum", "exclusiveMinimum", "externalDocs", "format", "items", "maxItems", "maxLength", "maxProperties", "maximum", "minItems", "minLength", "minProperties", "minimum", "multipleOf", "pattern", "properties", "readOnly", "required", "title", "type", "uniqueItems", "xml"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string _ref = 1; - v1 := compiler.MapValueForKey(m, "$ref") - if v1 != nil { - x.XRef, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for $ref: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string format = 2; - v2 := compiler.MapValueForKey(m, "format") - if v2 != nil { - x.Format, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string title = 3; - v3 := compiler.MapValueForKey(m, "title") - if v3 != nil { - x.Title, ok = v3.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for title: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 4; - v4 := compiler.MapValueForKey(m, "description") - if v4 != nil { - x.Description, ok = v4.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any default = 5; - v5 := compiler.MapValueForKey(m, "default") - if v5 != nil { - var err error - x.Default, err = NewAny(v5, compiler.NewContext("default", context)) - if err != nil { - errors = append(errors, err) - } - } - // float multiple_of = 6; - v6 := compiler.MapValueForKey(m, "multipleOf") - if v6 != nil { - switch v6 := v6.(type) { - case float64: - x.MultipleOf = v6 - case float32: - x.MultipleOf = float64(v6) - case uint64: - x.MultipleOf = float64(v6) - case uint32: - x.MultipleOf = float64(v6) - case int64: - x.MultipleOf = float64(v6) - case int32: - x.MultipleOf = float64(v6) - case int: - x.MultipleOf = float64(v6) - default: - message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v6, v6) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float maximum = 7; - v7 := compiler.MapValueForKey(m, "maximum") - if v7 != nil { - switch v7 := v7.(type) { - case float64: - x.Maximum = v7 - case float32: - x.Maximum = float64(v7) - case uint64: - x.Maximum = float64(v7) - case uint32: - x.Maximum = float64(v7) - case int64: - x.Maximum = float64(v7) - case int32: - x.Maximum = float64(v7) - case int: - x.Maximum = float64(v7) - default: - message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v7, v7) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_maximum = 8; - v8 := compiler.MapValueForKey(m, "exclusiveMaximum") - if v8 != nil { - x.ExclusiveMaximum, ok = v8.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v8, v8) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float minimum = 9; - v9 := compiler.MapValueForKey(m, "minimum") - if v9 != nil { - switch v9 := v9.(type) { - case float64: - x.Minimum = v9 - case float32: - x.Minimum = float64(v9) - case uint64: - x.Minimum = float64(v9) - case uint32: - x.Minimum = float64(v9) - case int64: - x.Minimum = float64(v9) - case int32: - x.Minimum = float64(v9) - case int: - x.Minimum = float64(v9) - default: - message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v9, v9) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_minimum = 10; - v10 := compiler.MapValueForKey(m, "exclusiveMinimum") - if v10 != nil { - x.ExclusiveMinimum, ok = v10.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v10, v10) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_length = 11; - v11 := compiler.MapValueForKey(m, "maxLength") - if v11 != nil { - t, ok := v11.(int) - if ok { - x.MaxLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v11, v11) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_length = 12; - v12 := compiler.MapValueForKey(m, "minLength") - if v12 != nil { - t, ok := v12.(int) - if ok { - x.MinLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v12, v12) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string pattern = 13; - v13 := compiler.MapValueForKey(m, "pattern") - if v13 != nil { - x.Pattern, ok = v13.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v13, v13) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_items = 14; - v14 := compiler.MapValueForKey(m, "maxItems") - if v14 != nil { - t, ok := v14.(int) - if ok { - x.MaxItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v14, v14) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_items = 15; - v15 := compiler.MapValueForKey(m, "minItems") - if v15 != nil { - t, ok := v15.(int) - if ok { - x.MinItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v15, v15) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool unique_items = 16; - v16 := compiler.MapValueForKey(m, "uniqueItems") - if v16 != nil { - x.UniqueItems, ok = v16.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v16, v16) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_properties = 17; - v17 := compiler.MapValueForKey(m, "maxProperties") - if v17 != nil { - t, ok := v17.(int) - if ok { - x.MaxProperties = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxProperties: %+v (%T)", v17, v17) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_properties = 18; - v18 := compiler.MapValueForKey(m, "minProperties") - if v18 != nil { - t, ok := v18.(int) - if ok { - x.MinProperties = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minProperties: %+v (%T)", v18, v18) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated string required = 19; - v19 := compiler.MapValueForKey(m, "required") - if v19 != nil { - v, ok := v19.([]interface{}) - if ok { - x.Required = compiler.ConvertInterfaceArrayToStringArray(v) - } else { - message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v19, v19) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated Any enum = 20; - v20 := compiler.MapValueForKey(m, "enum") - if v20 != nil { - // repeated Any - x.Enum = make([]*Any, 0) - a, ok := v20.([]interface{}) - if ok { - for _, item := range a { - y, err := NewAny(item, compiler.NewContext("enum", context)) - if err != nil { - errors = append(errors, err) - } - x.Enum = append(x.Enum, y) - } - } - } - // AdditionalPropertiesItem additional_properties = 21; - v21 := compiler.MapValueForKey(m, "additionalProperties") - if v21 != nil { - var err error - x.AdditionalProperties, err = NewAdditionalPropertiesItem(v21, compiler.NewContext("additionalProperties", context)) - if err != nil { - errors = append(errors, err) - } - } - // TypeItem type = 22; - v22 := compiler.MapValueForKey(m, "type") - if v22 != nil { - var err error - x.Type, err = NewTypeItem(v22, compiler.NewContext("type", context)) - if err != nil { - errors = append(errors, err) - } - } - // ItemsItem items = 23; - v23 := compiler.MapValueForKey(m, "items") - if v23 != nil { - var err error - x.Items, err = NewItemsItem(v23, compiler.NewContext("items", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated Schema all_of = 24; - v24 := compiler.MapValueForKey(m, "allOf") - if v24 != nil { - // repeated Schema - x.AllOf = make([]*Schema, 0) - a, ok := v24.([]interface{}) - if ok { - for _, item := range a { - y, err := NewSchema(item, compiler.NewContext("allOf", context)) - if err != nil { - errors = append(errors, err) - } - x.AllOf = append(x.AllOf, y) - } - } - } - // Properties properties = 25; - v25 := compiler.MapValueForKey(m, "properties") - if v25 != nil { - var err error - x.Properties, err = NewProperties(v25, compiler.NewContext("properties", context)) - if err != nil { - errors = append(errors, err) - } - } - // string discriminator = 26; - v26 := compiler.MapValueForKey(m, "discriminator") - if v26 != nil { - x.Discriminator, ok = v26.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for discriminator: %+v (%T)", v26, v26) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool read_only = 27; - v27 := compiler.MapValueForKey(m, "readOnly") - if v27 != nil { - x.ReadOnly, ok = v27.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for readOnly: %+v (%T)", v27, v27) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Xml xml = 28; - v28 := compiler.MapValueForKey(m, "xml") - if v28 != nil { - var err error - x.Xml, err = NewXml(v28, compiler.NewContext("xml", context)) - if err != nil { - errors = append(errors, err) - } - } - // ExternalDocs external_docs = 29; - v29 := compiler.MapValueForKey(m, "externalDocs") - if v29 != nil { - var err error - x.ExternalDocs, err = NewExternalDocs(v29, compiler.NewContext("externalDocs", context)) - if err != nil { - errors = append(errors, err) - } - } - // Any example = 30; - v30 := compiler.MapValueForKey(m, "example") - if v30 != nil { - var err error - x.Example, err = NewAny(v30, compiler.NewContext("example", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny vendor_extension = 31; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewSchemaItem creates an object of type SchemaItem if possible, returning an error if not. -func NewSchemaItem(in interface{}, context *compiler.Context) (*SchemaItem, error) { - errors := make([]error, 0) - x := &SchemaItem{} - matched := false - // Schema schema = 1; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewSchema(m, compiler.NewContext("schema", context)) - if matchingError == nil { - x.Oneof = &SchemaItem_Schema{Schema: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // FileSchema file_schema = 2; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewFileSchema(m, compiler.NewContext("fileSchema", context)) - if matchingError == nil { - x.Oneof = &SchemaItem_FileSchema{FileSchema: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewSecurityDefinitions creates an object of type SecurityDefinitions if possible, returning an error if not. -func NewSecurityDefinitions(in interface{}, context *compiler.Context) (*SecurityDefinitions, error) { - errors := make([]error, 0) - x := &SecurityDefinitions{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedSecurityDefinitionsItem additional_properties = 1; - // MAP: SecurityDefinitionsItem - x.AdditionalProperties = make([]*NamedSecurityDefinitionsItem, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - pair := &NamedSecurityDefinitionsItem{} - pair.Name = k - var err error - pair.Value, err = NewSecurityDefinitionsItem(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewSecurityDefinitionsItem creates an object of type SecurityDefinitionsItem if possible, returning an error if not. -func NewSecurityDefinitionsItem(in interface{}, context *compiler.Context) (*SecurityDefinitionsItem, error) { - errors := make([]error, 0) - x := &SecurityDefinitionsItem{} - matched := false - // BasicAuthenticationSecurity basic_authentication_security = 1; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewBasicAuthenticationSecurity(m, compiler.NewContext("basicAuthenticationSecurity", context)) - if matchingError == nil { - x.Oneof = &SecurityDefinitionsItem_BasicAuthenticationSecurity{BasicAuthenticationSecurity: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // ApiKeySecurity api_key_security = 2; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewApiKeySecurity(m, compiler.NewContext("apiKeySecurity", context)) - if matchingError == nil { - x.Oneof = &SecurityDefinitionsItem_ApiKeySecurity{ApiKeySecurity: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // Oauth2ImplicitSecurity oauth2_implicit_security = 3; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewOauth2ImplicitSecurity(m, compiler.NewContext("oauth2ImplicitSecurity", context)) - if matchingError == nil { - x.Oneof = &SecurityDefinitionsItem_Oauth2ImplicitSecurity{Oauth2ImplicitSecurity: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // Oauth2PasswordSecurity oauth2_password_security = 4; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewOauth2PasswordSecurity(m, compiler.NewContext("oauth2PasswordSecurity", context)) - if matchingError == nil { - x.Oneof = &SecurityDefinitionsItem_Oauth2PasswordSecurity{Oauth2PasswordSecurity: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // Oauth2ApplicationSecurity oauth2_application_security = 5; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewOauth2ApplicationSecurity(m, compiler.NewContext("oauth2ApplicationSecurity", context)) - if matchingError == nil { - x.Oneof = &SecurityDefinitionsItem_Oauth2ApplicationSecurity{Oauth2ApplicationSecurity: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // Oauth2AccessCodeSecurity oauth2_access_code_security = 6; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewOauth2AccessCodeSecurity(m, compiler.NewContext("oauth2AccessCodeSecurity", context)) - if matchingError == nil { - x.Oneof = &SecurityDefinitionsItem_Oauth2AccessCodeSecurity{Oauth2AccessCodeSecurity: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewSecurityRequirement creates an object of type SecurityRequirement if possible, returning an error if not. -func NewSecurityRequirement(in interface{}, context *compiler.Context) (*SecurityRequirement, error) { - errors := make([]error, 0) - x := &SecurityRequirement{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedStringArray additional_properties = 1; - // MAP: StringArray - x.AdditionalProperties = make([]*NamedStringArray, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - pair := &NamedStringArray{} - pair.Name = k - var err error - pair.Value, err = NewStringArray(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewStringArray creates an object of type StringArray if possible, returning an error if not. -func NewStringArray(in interface{}, context *compiler.Context) (*StringArray, error) { - errors := make([]error, 0) - x := &StringArray{} - a, ok := in.([]interface{}) - if !ok { - message := fmt.Sprintf("has unexpected value for StringArray: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - x.Value = make([]string, 0) - for _, s := range a { - x.Value = append(x.Value, s.(string)) - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewTag creates an object of type Tag if possible, returning an error if not. -func NewTag(in interface{}, context *compiler.Context) (*Tag, error) { - errors := make([]error, 0) - x := &Tag{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"name"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "externalDocs", "name"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 2; - v2 := compiler.MapValueForKey(m, "description") - if v2 != nil { - x.Description, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // ExternalDocs external_docs = 3; - v3 := compiler.MapValueForKey(m, "externalDocs") - if v3 != nil { - var err error - x.ExternalDocs, err = NewExternalDocs(v3, compiler.NewContext("externalDocs", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny vendor_extension = 4; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewTypeItem creates an object of type TypeItem if possible, returning an error if not. -func NewTypeItem(in interface{}, context *compiler.Context) (*TypeItem, error) { - errors := make([]error, 0) - x := &TypeItem{} - switch in := in.(type) { - case string: - x.Value = make([]string, 0) - x.Value = append(x.Value, in) - case []interface{}: - x.Value = make([]string, 0) - for _, v := range in { - value, ok := v.(string) - if ok { - x.Value = append(x.Value, value) - } else { - message := fmt.Sprintf("has unexpected value for string array element: %+v (%T)", value, value) - errors = append(errors, compiler.NewError(context, message)) - } - } - default: - message := fmt.Sprintf("has unexpected value for string array: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewVendorExtension creates an object of type VendorExtension if possible, returning an error if not. -func NewVendorExtension(in interface{}, context *compiler.Context) (*VendorExtension, error) { - errors := make([]error, 0) - x := &VendorExtension{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedAny additional_properties = 1; - // MAP: Any - x.AdditionalProperties = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewXml creates an object of type Xml if possible, returning an error if not. -func NewXml(in interface{}, context *compiler.Context) (*Xml, error) { - errors := make([]error, 0) - x := &Xml{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"attribute", "name", "namespace", "prefix", "wrapped"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = v1.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string namespace = 2; - v2 := compiler.MapValueForKey(m, "namespace") - if v2 != nil { - x.Namespace, ok = v2.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for namespace: %+v (%T)", v2, v2) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string prefix = 3; - v3 := compiler.MapValueForKey(m, "prefix") - if v3 != nil { - x.Prefix, ok = v3.(string) - if !ok { - message := fmt.Sprintf("has unexpected value for prefix: %+v (%T)", v3, v3) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool attribute = 4; - v4 := compiler.MapValueForKey(m, "attribute") - if v4 != nil { - x.Attribute, ok = v4.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for attribute: %+v (%T)", v4, v4) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool wrapped = 5; - v5 := compiler.MapValueForKey(m, "wrapped") - if v5 != nil { - x.Wrapped, ok = v5.(bool) - if !ok { - message := fmt.Sprintf("has unexpected value for wrapped: %+v (%T)", v5, v5) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 6; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for _, item := range m { - k, ok := compiler.StringValue(item.Key) - if ok { - v := item.Value - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.HandleExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes, _ := yaml.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside AdditionalPropertiesItem objects. -func (m *AdditionalPropertiesItem) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*AdditionalPropertiesItem_Schema) - if ok { - _, err := p.Schema.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Any objects. -func (m *Any) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ApiKeySecurity objects. -func (m *ApiKeySecurity) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside BasicAuthenticationSecurity objects. -func (m *BasicAuthenticationSecurity) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside BodyParameter objects. -func (m *BodyParameter) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Schema != nil { - _, err := m.Schema.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Contact objects. -func (m *Contact) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Default objects. -func (m *Default) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Definitions objects. -func (m *Definitions) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Document objects. -func (m *Document) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Info != nil { - _, err := m.Info.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Paths != nil { - _, err := m.Paths.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Definitions != nil { - _, err := m.Definitions.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Parameters != nil { - _, err := m.Parameters.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Responses != nil { - _, err := m.Responses.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Security { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - if m.SecurityDefinitions != nil { - _, err := m.SecurityDefinitions.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Tags { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - if m.ExternalDocs != nil { - _, err := m.ExternalDocs.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Examples objects. -func (m *Examples) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ExternalDocs objects. -func (m *ExternalDocs) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside FileSchema objects. -func (m *FileSchema) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.ExternalDocs != nil { - _, err := m.ExternalDocs.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Example != nil { - _, err := m.Example.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside FormDataParameterSubSchema objects. -func (m *FormDataParameterSubSchema) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Items != nil { - _, err := m.Items.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Enum { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Header objects. -func (m *Header) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Items != nil { - _, err := m.Items.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Enum { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside HeaderParameterSubSchema objects. -func (m *HeaderParameterSubSchema) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Items != nil { - _, err := m.Items.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Enum { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Headers objects. -func (m *Headers) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Info objects. -func (m *Info) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Contact != nil { - _, err := m.Contact.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.License != nil { - _, err := m.License.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ItemsItem objects. -func (m *ItemsItem) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.Schema { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside JsonReference objects. -func (m *JsonReference) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.XRef != "" { - info, err := compiler.ReadInfoForRef(root, m.XRef) - if err != nil { - return nil, err - } - if info != nil { - replacement, err := NewJsonReference(info, nil) - if err == nil { - *m = *replacement - return m.ResolveReferences(root) - } - } - return info, nil - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside License objects. -func (m *License) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedAny objects. -func (m *NamedAny) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedHeader objects. -func (m *NamedHeader) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedParameter objects. -func (m *NamedParameter) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedPathItem objects. -func (m *NamedPathItem) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedResponse objects. -func (m *NamedResponse) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedResponseValue objects. -func (m *NamedResponseValue) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedSchema objects. -func (m *NamedSchema) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedSecurityDefinitionsItem objects. -func (m *NamedSecurityDefinitionsItem) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedString objects. -func (m *NamedString) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedStringArray objects. -func (m *NamedStringArray) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NonBodyParameter objects. -func (m *NonBodyParameter) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*NonBodyParameter_HeaderParameterSubSchema) - if ok { - _, err := p.HeaderParameterSubSchema.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*NonBodyParameter_FormDataParameterSubSchema) - if ok { - _, err := p.FormDataParameterSubSchema.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*NonBodyParameter_QueryParameterSubSchema) - if ok { - _, err := p.QueryParameterSubSchema.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*NonBodyParameter_PathParameterSubSchema) - if ok { - _, err := p.PathParameterSubSchema.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Oauth2AccessCodeSecurity objects. -func (m *Oauth2AccessCodeSecurity) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Scopes != nil { - _, err := m.Scopes.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Oauth2ApplicationSecurity objects. -func (m *Oauth2ApplicationSecurity) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Scopes != nil { - _, err := m.Scopes.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Oauth2ImplicitSecurity objects. -func (m *Oauth2ImplicitSecurity) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Scopes != nil { - _, err := m.Scopes.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Oauth2PasswordSecurity objects. -func (m *Oauth2PasswordSecurity) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Scopes != nil { - _, err := m.Scopes.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Oauth2Scopes objects. -func (m *Oauth2Scopes) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Operation objects. -func (m *Operation) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.ExternalDocs != nil { - _, err := m.ExternalDocs.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Parameters { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - if m.Responses != nil { - _, err := m.Responses.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Security { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Parameter objects. -func (m *Parameter) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*Parameter_BodyParameter) - if ok { - _, err := p.BodyParameter.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*Parameter_NonBodyParameter) - if ok { - _, err := p.NonBodyParameter.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ParameterDefinitions objects. -func (m *ParameterDefinitions) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ParametersItem objects. -func (m *ParametersItem) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*ParametersItem_Parameter) - if ok { - _, err := p.Parameter.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*ParametersItem_JsonReference) - if ok { - info, err := p.JsonReference.ResolveReferences(root) - if err != nil { - return nil, err - } else if info != nil { - n, err := NewParametersItem(info, nil) - if err != nil { - return nil, err - } else if n != nil { - *m = *n - return nil, nil - } - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside PathItem objects. -func (m *PathItem) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.XRef != "" { - info, err := compiler.ReadInfoForRef(root, m.XRef) - if err != nil { - return nil, err - } - if info != nil { - replacement, err := NewPathItem(info, nil) - if err == nil { - *m = *replacement - return m.ResolveReferences(root) - } - } - return info, nil - } - if m.Get != nil { - _, err := m.Get.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Put != nil { - _, err := m.Put.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Post != nil { - _, err := m.Post.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Delete != nil { - _, err := m.Delete.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Options != nil { - _, err := m.Options.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Head != nil { - _, err := m.Head.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Patch != nil { - _, err := m.Patch.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Parameters { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside PathParameterSubSchema objects. -func (m *PathParameterSubSchema) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Items != nil { - _, err := m.Items.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Enum { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Paths objects. -func (m *Paths) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.Path { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside PrimitivesItems objects. -func (m *PrimitivesItems) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Items != nil { - _, err := m.Items.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Enum { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Properties objects. -func (m *Properties) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside QueryParameterSubSchema objects. -func (m *QueryParameterSubSchema) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Items != nil { - _, err := m.Items.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Enum { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Response objects. -func (m *Response) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.Schema != nil { - _, err := m.Schema.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Headers != nil { - _, err := m.Headers.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Examples != nil { - _, err := m.Examples.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ResponseDefinitions objects. -func (m *ResponseDefinitions) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ResponseValue objects. -func (m *ResponseValue) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*ResponseValue_Response) - if ok { - _, err := p.Response.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*ResponseValue_JsonReference) - if ok { - info, err := p.JsonReference.ResolveReferences(root) - if err != nil { - return nil, err - } else if info != nil { - n, err := NewResponseValue(info, nil) - if err != nil { - return nil, err - } else if n != nil { - *m = *n - return nil, nil - } - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Responses objects. -func (m *Responses) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.ResponseCode { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Schema objects. -func (m *Schema) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.XRef != "" { - info, err := compiler.ReadInfoForRef(root, m.XRef) - if err != nil { - return nil, err - } - if info != nil { - replacement, err := NewSchema(info, nil) - if err == nil { - *m = *replacement - return m.ResolveReferences(root) - } - } - return info, nil - } - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Enum { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - if m.AdditionalProperties != nil { - _, err := m.AdditionalProperties.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Type != nil { - _, err := m.Type.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Items != nil { - _, err := m.Items.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.AllOf { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - if m.Properties != nil { - _, err := m.Properties.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Xml != nil { - _, err := m.Xml.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.ExternalDocs != nil { - _, err := m.ExternalDocs.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Example != nil { - _, err := m.Example.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside SchemaItem objects. -func (m *SchemaItem) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*SchemaItem_Schema) - if ok { - _, err := p.Schema.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*SchemaItem_FileSchema) - if ok { - _, err := p.FileSchema.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside SecurityDefinitions objects. -func (m *SecurityDefinitions) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside SecurityDefinitionsItem objects. -func (m *SecurityDefinitionsItem) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*SecurityDefinitionsItem_BasicAuthenticationSecurity) - if ok { - _, err := p.BasicAuthenticationSecurity.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*SecurityDefinitionsItem_ApiKeySecurity) - if ok { - _, err := p.ApiKeySecurity.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2ImplicitSecurity) - if ok { - _, err := p.Oauth2ImplicitSecurity.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2PasswordSecurity) - if ok { - _, err := p.Oauth2PasswordSecurity.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2ApplicationSecurity) - if ok { - _, err := p.Oauth2ApplicationSecurity.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2AccessCodeSecurity) - if ok { - _, err := p.Oauth2AccessCodeSecurity.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside SecurityRequirement objects. -func (m *SecurityRequirement) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside StringArray objects. -func (m *StringArray) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Tag objects. -func (m *Tag) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - if m.ExternalDocs != nil { - _, err := m.ExternalDocs.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside TypeItem objects. -func (m *TypeItem) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside VendorExtension objects. -func (m *VendorExtension) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Xml objects. -func (m *Xml) ResolveReferences(root string) (interface{}, error) { - errors := make([]error, 0) - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ToRawInfo returns a description of AdditionalPropertiesItem suitable for JSON or YAML export. -func (m *AdditionalPropertiesItem) ToRawInfo() interface{} { - // ONE OF WRAPPER - // AdditionalPropertiesItem - // {Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetSchema() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:boolean Type:bool StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if v1, ok := m.GetOneof().(*AdditionalPropertiesItem_Boolean); ok { - return v1.Boolean - } - return nil -} - -// ToRawInfo returns a description of Any suitable for JSON or YAML export. -func (m *Any) ToRawInfo() interface{} { - var err error - var info1 []yaml.MapSlice - err = yaml.Unmarshal([]byte(m.Yaml), &info1) - if err == nil { - return info1 - } - var info2 yaml.MapSlice - err = yaml.Unmarshal([]byte(m.Yaml), &info2) - if err == nil { - return info2 - } - var info3 interface{} - err = yaml.Unmarshal([]byte(m.Yaml), &info3) - if err == nil { - return info3 - } - return nil -} - -// ToRawInfo returns a description of ApiKeySecurity suitable for JSON or YAML export. -func (m *ApiKeySecurity) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Type != "" { - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) - } - if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) - } - if m.In != "" { - info = append(info, yaml.MapItem{Key: "in", Value: m.In}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of BasicAuthenticationSecurity suitable for JSON or YAML export. -func (m *BasicAuthenticationSecurity) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Type != "" { - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of BodyParameter suitable for JSON or YAML export. -func (m *BodyParameter) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) - } - if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) - } - if m.In != "" { - info = append(info, yaml.MapItem{Key: "in", Value: m.In}) - } - if m.Required != false { - info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) - } - if m.Schema != nil { - info = append(info, yaml.MapItem{Key: "schema", Value: m.Schema.ToRawInfo()}) - } - // &{Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Contact suitable for JSON or YAML export. -func (m *Contact) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) - } - if m.Url != "" { - info = append(info, yaml.MapItem{Key: "url", Value: m.Url}) - } - if m.Email != "" { - info = append(info, yaml.MapItem{Key: "email", Value: m.Email}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Default suitable for JSON or YAML export. -func (m *Default) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:false Description:} - return info -} - -// ToRawInfo returns a description of Definitions suitable for JSON or YAML export. -func (m *Definitions) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:additionalProperties Type:NamedSchema StringEnumValues:[] MapType:Schema Repeated:true Pattern: Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Document suitable for JSON or YAML export. -func (m *Document) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Swagger != "" { - info = append(info, yaml.MapItem{Key: "swagger", Value: m.Swagger}) - } - if m.Info != nil { - info = append(info, yaml.MapItem{Key: "info", Value: m.Info.ToRawInfo()}) - } - // &{Name:info Type:Info StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Host != "" { - info = append(info, yaml.MapItem{Key: "host", Value: m.Host}) - } - if m.BasePath != "" { - info = append(info, yaml.MapItem{Key: "basePath", Value: m.BasePath}) - } - if len(m.Schemes) != 0 { - info = append(info, yaml.MapItem{Key: "schemes", Value: m.Schemes}) - } - if len(m.Consumes) != 0 { - info = append(info, yaml.MapItem{Key: "consumes", Value: m.Consumes}) - } - if len(m.Produces) != 0 { - info = append(info, yaml.MapItem{Key: "produces", Value: m.Produces}) - } - if m.Paths != nil { - info = append(info, yaml.MapItem{Key: "paths", Value: m.Paths.ToRawInfo()}) - } - // &{Name:paths Type:Paths StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Definitions != nil { - info = append(info, yaml.MapItem{Key: "definitions", Value: m.Definitions.ToRawInfo()}) - } - // &{Name:definitions Type:Definitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Parameters != nil { - info = append(info, yaml.MapItem{Key: "parameters", Value: m.Parameters.ToRawInfo()}) - } - // &{Name:parameters Type:ParameterDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Responses != nil { - info = append(info, yaml.MapItem{Key: "responses", Value: m.Responses.ToRawInfo()}) - } - // &{Name:responses Type:ResponseDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if len(m.Security) != 0 { - items := make([]interface{}, 0) - for _, item := range m.Security { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{Key: "security", Value: items}) - } - // &{Name:security Type:SecurityRequirement StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} - if m.SecurityDefinitions != nil { - info = append(info, yaml.MapItem{Key: "securityDefinitions", Value: m.SecurityDefinitions.ToRawInfo()}) - } - // &{Name:securityDefinitions Type:SecurityDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if len(m.Tags) != 0 { - items := make([]interface{}, 0) - for _, item := range m.Tags { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{Key: "tags", Value: items}) - } - // &{Name:tags Type:Tag StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} - if m.ExternalDocs != nil { - info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()}) - } - // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Examples suitable for JSON or YAML export. -func (m *Examples) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of ExternalDocs suitable for JSON or YAML export. -func (m *ExternalDocs) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) - } - if m.Url != "" { - info = append(info, yaml.MapItem{Key: "url", Value: m.Url}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of FileSchema suitable for JSON or YAML export. -func (m *FileSchema) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Format != "" { - info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) - } - if m.Title != "" { - info = append(info, yaml.MapItem{Key: "title", Value: m.Title}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) - } - if m.Default != nil { - info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) - } - // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if len(m.Required) != 0 { - info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) - } - if m.Type != "" { - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) - } - if m.ReadOnly != false { - info = append(info, yaml.MapItem{Key: "readOnly", Value: m.ReadOnly}) - } - if m.ExternalDocs != nil { - info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()}) - } - // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Example != nil { - info = append(info, yaml.MapItem{Key: "example", Value: m.Example.ToRawInfo()}) - } - // &{Name:example Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of FormDataParameterSubSchema suitable for JSON or YAML export. -func (m *FormDataParameterSubSchema) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Required != false { - info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) - } - if m.In != "" { - info = append(info, yaml.MapItem{Key: "in", Value: m.In}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) - } - if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) - } - if m.AllowEmptyValue != false { - info = append(info, yaml.MapItem{Key: "allowEmptyValue", Value: m.AllowEmptyValue}) - } - if m.Type != "" { - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) - } - if m.Format != "" { - info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) - } - if m.Items != nil { - info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) - } - // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.CollectionFormat != "" { - info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) - } - if m.Default != nil { - info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) - } - // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) - } - if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) - } - if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) - } - if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) - } - if m.MaxLength != 0 { - info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) - } - if m.MinLength != 0 { - info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) - } - if m.Pattern != "" { - info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) - } - if m.MaxItems != 0 { - info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) - } - if m.MinItems != 0 { - info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) - } - if m.UniqueItems != false { - info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) - } - if len(m.Enum) != 0 { - items := make([]interface{}, 0) - for _, item := range m.Enum { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{Key: "enum", Value: items}) - } - // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} - if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Header suitable for JSON or YAML export. -func (m *Header) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Type != "" { - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) - } - if m.Format != "" { - info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) - } - if m.Items != nil { - info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) - } - // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.CollectionFormat != "" { - info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) - } - if m.Default != nil { - info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) - } - // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) - } - if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) - } - if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) - } - if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) - } - if m.MaxLength != 0 { - info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) - } - if m.MinLength != 0 { - info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) - } - if m.Pattern != "" { - info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) - } - if m.MaxItems != 0 { - info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) - } - if m.MinItems != 0 { - info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) - } - if m.UniqueItems != false { - info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) - } - if len(m.Enum) != 0 { - items := make([]interface{}, 0) - for _, item := range m.Enum { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{Key: "enum", Value: items}) - } - // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} - if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of HeaderParameterSubSchema suitable for JSON or YAML export. -func (m *HeaderParameterSubSchema) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Required != false { - info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) - } - if m.In != "" { - info = append(info, yaml.MapItem{Key: "in", Value: m.In}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) - } - if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) - } - if m.Type != "" { - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) - } - if m.Format != "" { - info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) - } - if m.Items != nil { - info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) - } - // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.CollectionFormat != "" { - info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) - } - if m.Default != nil { - info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) - } - // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) - } - if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) - } - if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) - } - if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) - } - if m.MaxLength != 0 { - info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) - } - if m.MinLength != 0 { - info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) - } - if m.Pattern != "" { - info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) - } - if m.MaxItems != 0 { - info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) - } - if m.MinItems != 0 { - info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) - } - if m.UniqueItems != false { - info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) - } - if len(m.Enum) != 0 { - items := make([]interface{}, 0) - for _, item := range m.Enum { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{Key: "enum", Value: items}) - } - // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} - if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Headers suitable for JSON or YAML export. -func (m *Headers) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:additionalProperties Type:NamedHeader StringEnumValues:[] MapType:Header Repeated:true Pattern: Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Info suitable for JSON or YAML export. -func (m *Info) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Title != "" { - info = append(info, yaml.MapItem{Key: "title", Value: m.Title}) - } - if m.Version != "" { - info = append(info, yaml.MapItem{Key: "version", Value: m.Version}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) - } - if m.TermsOfService != "" { - info = append(info, yaml.MapItem{Key: "termsOfService", Value: m.TermsOfService}) - } - if m.Contact != nil { - info = append(info, yaml.MapItem{Key: "contact", Value: m.Contact.ToRawInfo()}) - } - // &{Name:contact Type:Contact StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.License != nil { - info = append(info, yaml.MapItem{Key: "license", Value: m.License.ToRawInfo()}) - } - // &{Name:license Type:License StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of ItemsItem suitable for JSON or YAML export. -func (m *ItemsItem) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if len(m.Schema) != 0 { - items := make([]interface{}, 0) - for _, item := range m.Schema { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{Key: "schema", Value: items}) - } - // &{Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} - return info -} - -// ToRawInfo returns a description of JsonReference suitable for JSON or YAML export. -func (m *JsonReference) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.XRef != "" { - info = append(info, yaml.MapItem{Key: "$ref", Value: m.XRef}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) - } - return info -} - -// ToRawInfo returns a description of License suitable for JSON or YAML export. -func (m *License) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) - } - if m.Url != "" { - info = append(info, yaml.MapItem{Key: "url", Value: m.Url}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of NamedAny suitable for JSON or YAML export. -func (m *NamedAny) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) - } - // &{Name:value Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedHeader suitable for JSON or YAML export. -func (m *NamedHeader) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) - } - // &{Name:value Type:Header StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedParameter suitable for JSON or YAML export. -func (m *NamedParameter) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) - } - // &{Name:value Type:Parameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedPathItem suitable for JSON or YAML export. -func (m *NamedPathItem) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) - } - // &{Name:value Type:PathItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedResponse suitable for JSON or YAML export. -func (m *NamedResponse) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) - } - // &{Name:value Type:Response StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedResponseValue suitable for JSON or YAML export. -func (m *NamedResponseValue) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) - } - // &{Name:value Type:ResponseValue StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedSchema suitable for JSON or YAML export. -func (m *NamedSchema) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) - } - // &{Name:value Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedSecurityDefinitionsItem suitable for JSON or YAML export. -func (m *NamedSecurityDefinitionsItem) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) - } - // &{Name:value Type:SecurityDefinitionsItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedString suitable for JSON or YAML export. -func (m *NamedString) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) - } - if m.Value != "" { - info = append(info, yaml.MapItem{Key: "value", Value: m.Value}) - } - return info -} - -// ToRawInfo returns a description of NamedStringArray suitable for JSON or YAML export. -func (m *NamedStringArray) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) - } - // &{Name:value Type:StringArray StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NonBodyParameter suitable for JSON or YAML export. -func (m *NonBodyParameter) ToRawInfo() interface{} { - // ONE OF WRAPPER - // NonBodyParameter - // {Name:headerParameterSubSchema Type:HeaderParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetHeaderParameterSubSchema() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:formDataParameterSubSchema Type:FormDataParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v1 := m.GetFormDataParameterSubSchema() - if v1 != nil { - return v1.ToRawInfo() - } - // {Name:queryParameterSubSchema Type:QueryParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v2 := m.GetQueryParameterSubSchema() - if v2 != nil { - return v2.ToRawInfo() - } - // {Name:pathParameterSubSchema Type:PathParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v3 := m.GetPathParameterSubSchema() - if v3 != nil { - return v3.ToRawInfo() - } - return nil -} - -// ToRawInfo returns a description of Oauth2AccessCodeSecurity suitable for JSON or YAML export. -func (m *Oauth2AccessCodeSecurity) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Type != "" { - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) - } - if m.Flow != "" { - info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow}) - } - if m.Scopes != nil { - info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()}) - } - // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.AuthorizationUrl != "" { - info = append(info, yaml.MapItem{Key: "authorizationUrl", Value: m.AuthorizationUrl}) - } - if m.TokenUrl != "" { - info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Oauth2ApplicationSecurity suitable for JSON or YAML export. -func (m *Oauth2ApplicationSecurity) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Type != "" { - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) - } - if m.Flow != "" { - info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow}) - } - if m.Scopes != nil { - info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()}) - } - // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.TokenUrl != "" { - info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Oauth2ImplicitSecurity suitable for JSON or YAML export. -func (m *Oauth2ImplicitSecurity) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Type != "" { - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) - } - if m.Flow != "" { - info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow}) - } - if m.Scopes != nil { - info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()}) - } - // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.AuthorizationUrl != "" { - info = append(info, yaml.MapItem{Key: "authorizationUrl", Value: m.AuthorizationUrl}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Oauth2PasswordSecurity suitable for JSON or YAML export. -func (m *Oauth2PasswordSecurity) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Type != "" { - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) - } - if m.Flow != "" { - info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow}) - } - if m.Scopes != nil { - info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()}) - } - // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.TokenUrl != "" { - info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Oauth2Scopes suitable for JSON or YAML export. -func (m *Oauth2Scopes) ToRawInfo() interface{} { - info := yaml.MapSlice{} - // &{Name:additionalProperties Type:NamedString StringEnumValues:[] MapType:string Repeated:true Pattern: Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Operation suitable for JSON or YAML export. -func (m *Operation) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if len(m.Tags) != 0 { - info = append(info, yaml.MapItem{Key: "tags", Value: m.Tags}) - } - if m.Summary != "" { - info = append(info, yaml.MapItem{Key: "summary", Value: m.Summary}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) - } - if m.ExternalDocs != nil { - info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()}) - } - // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.OperationId != "" { - info = append(info, yaml.MapItem{Key: "operationId", Value: m.OperationId}) - } - if len(m.Produces) != 0 { - info = append(info, yaml.MapItem{Key: "produces", Value: m.Produces}) - } - if len(m.Consumes) != 0 { - info = append(info, yaml.MapItem{Key: "consumes", Value: m.Consumes}) - } - if len(m.Parameters) != 0 { - items := make([]interface{}, 0) - for _, item := range m.Parameters { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{Key: "parameters", Value: items}) - } - // &{Name:parameters Type:ParametersItem StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:The parameters needed to send a valid API call.} - if m.Responses != nil { - info = append(info, yaml.MapItem{Key: "responses", Value: m.Responses.ToRawInfo()}) - } - // &{Name:responses Type:Responses StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if len(m.Schemes) != 0 { - info = append(info, yaml.MapItem{Key: "schemes", Value: m.Schemes}) - } - if m.Deprecated != false { - info = append(info, yaml.MapItem{Key: "deprecated", Value: m.Deprecated}) - } - if len(m.Security) != 0 { - items := make([]interface{}, 0) - for _, item := range m.Security { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{Key: "security", Value: items}) - } - // &{Name:security Type:SecurityRequirement StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Parameter suitable for JSON or YAML export. -func (m *Parameter) ToRawInfo() interface{} { - // ONE OF WRAPPER - // Parameter - // {Name:bodyParameter Type:BodyParameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetBodyParameter() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:nonBodyParameter Type:NonBodyParameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v1 := m.GetNonBodyParameter() - if v1 != nil { - return v1.ToRawInfo() - } - return nil -} - -// ToRawInfo returns a description of ParameterDefinitions suitable for JSON or YAML export. -func (m *ParameterDefinitions) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:additionalProperties Type:NamedParameter StringEnumValues:[] MapType:Parameter Repeated:true Pattern: Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of ParametersItem suitable for JSON or YAML export. -func (m *ParametersItem) ToRawInfo() interface{} { - // ONE OF WRAPPER - // ParametersItem - // {Name:parameter Type:Parameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetParameter() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:jsonReference Type:JsonReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v1 := m.GetJsonReference() - if v1 != nil { - return v1.ToRawInfo() - } - return nil -} - -// ToRawInfo returns a description of PathItem suitable for JSON or YAML export. -func (m *PathItem) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.XRef != "" { - info = append(info, yaml.MapItem{Key: "$ref", Value: m.XRef}) - } - if m.Get != nil { - info = append(info, yaml.MapItem{Key: "get", Value: m.Get.ToRawInfo()}) - } - // &{Name:get Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Put != nil { - info = append(info, yaml.MapItem{Key: "put", Value: m.Put.ToRawInfo()}) - } - // &{Name:put Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Post != nil { - info = append(info, yaml.MapItem{Key: "post", Value: m.Post.ToRawInfo()}) - } - // &{Name:post Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Delete != nil { - info = append(info, yaml.MapItem{Key: "delete", Value: m.Delete.ToRawInfo()}) - } - // &{Name:delete Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Options != nil { - info = append(info, yaml.MapItem{Key: "options", Value: m.Options.ToRawInfo()}) - } - // &{Name:options Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Head != nil { - info = append(info, yaml.MapItem{Key: "head", Value: m.Head.ToRawInfo()}) - } - // &{Name:head Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Patch != nil { - info = append(info, yaml.MapItem{Key: "patch", Value: m.Patch.ToRawInfo()}) - } - // &{Name:patch Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if len(m.Parameters) != 0 { - items := make([]interface{}, 0) - for _, item := range m.Parameters { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{Key: "parameters", Value: items}) - } - // &{Name:parameters Type:ParametersItem StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:The parameters needed to send a valid API call.} - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of PathParameterSubSchema suitable for JSON or YAML export. -func (m *PathParameterSubSchema) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Required != false { - info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) - } - if m.In != "" { - info = append(info, yaml.MapItem{Key: "in", Value: m.In}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) - } - if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) - } - if m.Type != "" { - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) - } - if m.Format != "" { - info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) - } - if m.Items != nil { - info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) - } - // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.CollectionFormat != "" { - info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) - } - if m.Default != nil { - info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) - } - // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) - } - if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) - } - if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) - } - if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) - } - if m.MaxLength != 0 { - info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) - } - if m.MinLength != 0 { - info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) - } - if m.Pattern != "" { - info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) - } - if m.MaxItems != 0 { - info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) - } - if m.MinItems != 0 { - info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) - } - if m.UniqueItems != false { - info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) - } - if len(m.Enum) != 0 { - items := make([]interface{}, 0) - for _, item := range m.Enum { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{Key: "enum", Value: items}) - } - // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} - if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Paths suitable for JSON or YAML export. -func (m *Paths) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - if m.Path != nil { - for _, item := range m.Path { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:Path Type:NamedPathItem StringEnumValues:[] MapType:PathItem Repeated:true Pattern:^/ Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of PrimitivesItems suitable for JSON or YAML export. -func (m *PrimitivesItems) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Type != "" { - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) - } - if m.Format != "" { - info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) - } - if m.Items != nil { - info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) - } - // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.CollectionFormat != "" { - info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) - } - if m.Default != nil { - info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) - } - // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) - } - if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) - } - if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) - } - if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) - } - if m.MaxLength != 0 { - info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) - } - if m.MinLength != 0 { - info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) - } - if m.Pattern != "" { - info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) - } - if m.MaxItems != 0 { - info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) - } - if m.MinItems != 0 { - info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) - } - if m.UniqueItems != false { - info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) - } - if len(m.Enum) != 0 { - items := make([]interface{}, 0) - for _, item := range m.Enum { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{Key: "enum", Value: items}) - } - // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} - if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Properties suitable for JSON or YAML export. -func (m *Properties) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:additionalProperties Type:NamedSchema StringEnumValues:[] MapType:Schema Repeated:true Pattern: Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of QueryParameterSubSchema suitable for JSON or YAML export. -func (m *QueryParameterSubSchema) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Required != false { - info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) - } - if m.In != "" { - info = append(info, yaml.MapItem{Key: "in", Value: m.In}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) - } - if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) - } - if m.AllowEmptyValue != false { - info = append(info, yaml.MapItem{Key: "allowEmptyValue", Value: m.AllowEmptyValue}) - } - if m.Type != "" { - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) - } - if m.Format != "" { - info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) - } - if m.Items != nil { - info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) - } - // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.CollectionFormat != "" { - info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) - } - if m.Default != nil { - info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) - } - // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) - } - if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) - } - if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) - } - if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) - } - if m.MaxLength != 0 { - info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) - } - if m.MinLength != 0 { - info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) - } - if m.Pattern != "" { - info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) - } - if m.MaxItems != 0 { - info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) - } - if m.MinItems != 0 { - info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) - } - if m.UniqueItems != false { - info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) - } - if len(m.Enum) != 0 { - items := make([]interface{}, 0) - for _, item := range m.Enum { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{Key: "enum", Value: items}) - } - // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} - if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Response suitable for JSON or YAML export. -func (m *Response) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) - } - if m.Schema != nil { - info = append(info, yaml.MapItem{Key: "schema", Value: m.Schema.ToRawInfo()}) - } - // &{Name:schema Type:SchemaItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Headers != nil { - info = append(info, yaml.MapItem{Key: "headers", Value: m.Headers.ToRawInfo()}) - } - // &{Name:headers Type:Headers StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Examples != nil { - info = append(info, yaml.MapItem{Key: "examples", Value: m.Examples.ToRawInfo()}) - } - // &{Name:examples Type:Examples StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of ResponseDefinitions suitable for JSON or YAML export. -func (m *ResponseDefinitions) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:additionalProperties Type:NamedResponse StringEnumValues:[] MapType:Response Repeated:true Pattern: Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of ResponseValue suitable for JSON or YAML export. -func (m *ResponseValue) ToRawInfo() interface{} { - // ONE OF WRAPPER - // ResponseValue - // {Name:response Type:Response StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetResponse() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:jsonReference Type:JsonReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v1 := m.GetJsonReference() - if v1 != nil { - return v1.ToRawInfo() - } - return nil -} - -// ToRawInfo returns a description of Responses suitable for JSON or YAML export. -func (m *Responses) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.ResponseCode != nil { - for _, item := range m.ResponseCode { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:ResponseCode Type:NamedResponseValue StringEnumValues:[] MapType:ResponseValue Repeated:true Pattern:^([0-9]{3})$|^(default)$ Implicit:true Description:} - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Schema suitable for JSON or YAML export. -func (m *Schema) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.XRef != "" { - info = append(info, yaml.MapItem{Key: "$ref", Value: m.XRef}) - } - if m.Format != "" { - info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) - } - if m.Title != "" { - info = append(info, yaml.MapItem{Key: "title", Value: m.Title}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) - } - if m.Default != nil { - info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) - } - // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) - } - if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) - } - if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) - } - if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) - } - if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) - } - if m.MaxLength != 0 { - info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) - } - if m.MinLength != 0 { - info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) - } - if m.Pattern != "" { - info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) - } - if m.MaxItems != 0 { - info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) - } - if m.MinItems != 0 { - info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) - } - if m.UniqueItems != false { - info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) - } - if m.MaxProperties != 0 { - info = append(info, yaml.MapItem{Key: "maxProperties", Value: m.MaxProperties}) - } - if m.MinProperties != 0 { - info = append(info, yaml.MapItem{Key: "minProperties", Value: m.MinProperties}) - } - if len(m.Required) != 0 { - info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) - } - if len(m.Enum) != 0 { - items := make([]interface{}, 0) - for _, item := range m.Enum { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{Key: "enum", Value: items}) - } - // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} - if m.AdditionalProperties != nil { - info = append(info, yaml.MapItem{Key: "additionalProperties", Value: m.AdditionalProperties.ToRawInfo()}) - } - // &{Name:additionalProperties Type:AdditionalPropertiesItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Type != nil { - if len(m.Type.Value) == 1 { - info = append(info, yaml.MapItem{Key: "type", Value: m.Type.Value[0]}) - } else { - info = append(info, yaml.MapItem{Key: "type", Value: m.Type.Value}) - } - } - // &{Name:type Type:TypeItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Items != nil { - items := make([]interface{}, 0) - for _, item := range m.Items.Schema { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{Key: "items", Value: items[0]}) - } - // &{Name:items Type:ItemsItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if len(m.AllOf) != 0 { - items := make([]interface{}, 0) - for _, item := range m.AllOf { - items = append(items, item.ToRawInfo()) - } - info = append(info, yaml.MapItem{Key: "allOf", Value: items}) - } - // &{Name:allOf Type:Schema StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} - if m.Properties != nil { - info = append(info, yaml.MapItem{Key: "properties", Value: m.Properties.ToRawInfo()}) - } - // &{Name:properties Type:Properties StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Discriminator != "" { - info = append(info, yaml.MapItem{Key: "discriminator", Value: m.Discriminator}) - } - if m.ReadOnly != false { - info = append(info, yaml.MapItem{Key: "readOnly", Value: m.ReadOnly}) - } - if m.Xml != nil { - info = append(info, yaml.MapItem{Key: "xml", Value: m.Xml.ToRawInfo()}) - } - // &{Name:xml Type:Xml StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.ExternalDocs != nil { - info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()}) - } - // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.Example != nil { - info = append(info, yaml.MapItem{Key: "example", Value: m.Example.ToRawInfo()}) - } - // &{Name:example Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of SchemaItem suitable for JSON or YAML export. -func (m *SchemaItem) ToRawInfo() interface{} { - // ONE OF WRAPPER - // SchemaItem - // {Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetSchema() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:fileSchema Type:FileSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v1 := m.GetFileSchema() - if v1 != nil { - return v1.ToRawInfo() - } - return nil -} - -// ToRawInfo returns a description of SecurityDefinitions suitable for JSON or YAML export. -func (m *SecurityDefinitions) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:additionalProperties Type:NamedSecurityDefinitionsItem StringEnumValues:[] MapType:SecurityDefinitionsItem Repeated:true Pattern: Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of SecurityDefinitionsItem suitable for JSON or YAML export. -func (m *SecurityDefinitionsItem) ToRawInfo() interface{} { - // ONE OF WRAPPER - // SecurityDefinitionsItem - // {Name:basicAuthenticationSecurity Type:BasicAuthenticationSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetBasicAuthenticationSecurity() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:apiKeySecurity Type:ApiKeySecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v1 := m.GetApiKeySecurity() - if v1 != nil { - return v1.ToRawInfo() - } - // {Name:oauth2ImplicitSecurity Type:Oauth2ImplicitSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v2 := m.GetOauth2ImplicitSecurity() - if v2 != nil { - return v2.ToRawInfo() - } - // {Name:oauth2PasswordSecurity Type:Oauth2PasswordSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v3 := m.GetOauth2PasswordSecurity() - if v3 != nil { - return v3.ToRawInfo() - } - // {Name:oauth2ApplicationSecurity Type:Oauth2ApplicationSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v4 := m.GetOauth2ApplicationSecurity() - if v4 != nil { - return v4.ToRawInfo() - } - // {Name:oauth2AccessCodeSecurity Type:Oauth2AccessCodeSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v5 := m.GetOauth2AccessCodeSecurity() - if v5 != nil { - return v5.ToRawInfo() - } - return nil -} - -// ToRawInfo returns a description of SecurityRequirement suitable for JSON or YAML export. -func (m *SecurityRequirement) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:additionalProperties Type:NamedStringArray StringEnumValues:[] MapType:StringArray Repeated:true Pattern: Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of StringArray suitable for JSON or YAML export. -func (m *StringArray) ToRawInfo() interface{} { - return m.Value -} - -// ToRawInfo returns a description of Tag suitable for JSON or YAML export. -func (m *Tag) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) - } - if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) - } - if m.ExternalDocs != nil { - info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()}) - } - // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of TypeItem suitable for JSON or YAML export. -func (m *TypeItem) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if len(m.Value) != 0 { - info = append(info, yaml.MapItem{Key: "value", Value: m.Value}) - } - return info -} - -// ToRawInfo returns a description of VendorExtension suitable for JSON or YAML export. -func (m *VendorExtension) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Xml suitable for JSON or YAML export. -func (m *Xml) ToRawInfo() interface{} { - info := yaml.MapSlice{} - if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) - } - if m.Namespace != "" { - info = append(info, yaml.MapItem{Key: "namespace", Value: m.Namespace}) - } - if m.Prefix != "" { - info = append(info, yaml.MapItem{Key: "prefix", Value: m.Prefix}) - } - if m.Attribute != false { - info = append(info, yaml.MapItem{Key: "attribute", Value: m.Attribute}) - } - if m.Wrapped != false { - info = append(info, yaml.MapItem{Key: "wrapped", Value: m.Wrapped}) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) - } - } - // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} - return info -} - -var ( - pattern0 = regexp.MustCompile("^x-") - pattern1 = regexp.MustCompile("^/") - pattern2 = regexp.MustCompile("^([0-9]{3})$|^(default)$") -) diff --git a/cmd/bpa-operator/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go b/cmd/bpa-operator/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go deleted file mode 100644 index a030fa6..0000000 --- a/cmd/bpa-operator/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go +++ /dev/null @@ -1,4455 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: OpenAPIv2/OpenAPIv2.proto - -/* -Package openapi_v2 is a generated protocol buffer package. - -It is generated from these files: - OpenAPIv2/OpenAPIv2.proto - -It has these top-level messages: - AdditionalPropertiesItem - Any - ApiKeySecurity - BasicAuthenticationSecurity - BodyParameter - Contact - Default - Definitions - Document - Examples - ExternalDocs - FileSchema - FormDataParameterSubSchema - Header - HeaderParameterSubSchema - Headers - Info - ItemsItem - JsonReference - License - NamedAny - NamedHeader - NamedParameter - NamedPathItem - NamedResponse - NamedResponseValue - NamedSchema - NamedSecurityDefinitionsItem - NamedString - NamedStringArray - NonBodyParameter - Oauth2AccessCodeSecurity - Oauth2ApplicationSecurity - Oauth2ImplicitSecurity - Oauth2PasswordSecurity - Oauth2Scopes - Operation - Parameter - ParameterDefinitions - ParametersItem - PathItem - PathParameterSubSchema - Paths - PrimitivesItems - Properties - QueryParameterSubSchema - Response - ResponseDefinitions - ResponseValue - Responses - Schema - SchemaItem - SecurityDefinitions - SecurityDefinitionsItem - SecurityRequirement - StringArray - Tag - TypeItem - VendorExtension - Xml -*/ -package openapi_v2 - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import google_protobuf "github.com/golang/protobuf/ptypes/any" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type AdditionalPropertiesItem struct { - // Types that are valid to be assigned to Oneof: - // *AdditionalPropertiesItem_Schema - // *AdditionalPropertiesItem_Boolean - Oneof isAdditionalPropertiesItem_Oneof `protobuf_oneof:"oneof"` -} - -func (m *AdditionalPropertiesItem) Reset() { *m = AdditionalPropertiesItem{} } -func (m *AdditionalPropertiesItem) String() string { return proto.CompactTextString(m) } -func (*AdditionalPropertiesItem) ProtoMessage() {} -func (*AdditionalPropertiesItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } - -type isAdditionalPropertiesItem_Oneof interface { - isAdditionalPropertiesItem_Oneof() -} - -type AdditionalPropertiesItem_Schema struct { - Schema *Schema `protobuf:"bytes,1,opt,name=schema,oneof"` -} -type AdditionalPropertiesItem_Boolean struct { - Boolean bool `protobuf:"varint,2,opt,name=boolean,oneof"` -} - -func (*AdditionalPropertiesItem_Schema) isAdditionalPropertiesItem_Oneof() {} -func (*AdditionalPropertiesItem_Boolean) isAdditionalPropertiesItem_Oneof() {} - -func (m *AdditionalPropertiesItem) GetOneof() isAdditionalPropertiesItem_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (m *AdditionalPropertiesItem) GetSchema() *Schema { - if x, ok := m.GetOneof().(*AdditionalPropertiesItem_Schema); ok { - return x.Schema - } - return nil -} - -func (m *AdditionalPropertiesItem) GetBoolean() bool { - if x, ok := m.GetOneof().(*AdditionalPropertiesItem_Boolean); ok { - return x.Boolean - } - return false -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*AdditionalPropertiesItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _AdditionalPropertiesItem_OneofMarshaler, _AdditionalPropertiesItem_OneofUnmarshaler, _AdditionalPropertiesItem_OneofSizer, []interface{}{ - (*AdditionalPropertiesItem_Schema)(nil), - (*AdditionalPropertiesItem_Boolean)(nil), - } -} - -func _AdditionalPropertiesItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*AdditionalPropertiesItem) - // oneof - switch x := m.Oneof.(type) { - case *AdditionalPropertiesItem_Schema: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Schema); err != nil { - return err - } - case *AdditionalPropertiesItem_Boolean: - t := uint64(0) - if x.Boolean { - t = 1 - } - b.EncodeVarint(2<<3 | proto.WireVarint) - b.EncodeVarint(t) - case nil: - default: - return fmt.Errorf("AdditionalPropertiesItem.Oneof has unexpected type %T", x) - } - return nil -} - -func _AdditionalPropertiesItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*AdditionalPropertiesItem) - switch tag { - case 1: // oneof.schema - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Schema) - err := b.DecodeMessage(msg) - m.Oneof = &AdditionalPropertiesItem_Schema{msg} - return true, err - case 2: // oneof.boolean - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Oneof = &AdditionalPropertiesItem_Boolean{x != 0} - return true, err - default: - return false, nil - } -} - -func _AdditionalPropertiesItem_OneofSizer(msg proto.Message) (n int) { - m := msg.(*AdditionalPropertiesItem) - // oneof - switch x := m.Oneof.(type) { - case *AdditionalPropertiesItem_Schema: - s := proto.Size(x.Schema) - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *AdditionalPropertiesItem_Boolean: - n += proto.SizeVarint(2<<3 | proto.WireVarint) - n += 1 - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -type Any struct { - Value *google_protobuf.Any `protobuf:"bytes,1,opt,name=value" json:"value,omitempty"` - Yaml string `protobuf:"bytes,2,opt,name=yaml" json:"yaml,omitempty"` -} - -func (m *Any) Reset() { *m = Any{} } -func (m *Any) String() string { return proto.CompactTextString(m) } -func (*Any) ProtoMessage() {} -func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } - -func (m *Any) GetValue() *google_protobuf.Any { - if m != nil { - return m.Value - } - return nil -} - -func (m *Any) GetYaml() string { - if m != nil { - return m.Yaml - } - return "" -} - -type ApiKeySecurity struct { - Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` - In string `protobuf:"bytes,3,opt,name=in" json:"in,omitempty"` - Description string `protobuf:"bytes,4,opt,name=description" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *ApiKeySecurity) Reset() { *m = ApiKeySecurity{} } -func (m *ApiKeySecurity) String() string { return proto.CompactTextString(m) } -func (*ApiKeySecurity) ProtoMessage() {} -func (*ApiKeySecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } - -func (m *ApiKeySecurity) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *ApiKeySecurity) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *ApiKeySecurity) GetIn() string { - if m != nil { - return m.In - } - return "" -} - -func (m *ApiKeySecurity) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *ApiKeySecurity) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type BasicAuthenticationSecurity struct { - Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` - Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *BasicAuthenticationSecurity) Reset() { *m = BasicAuthenticationSecurity{} } -func (m *BasicAuthenticationSecurity) String() string { return proto.CompactTextString(m) } -func (*BasicAuthenticationSecurity) ProtoMessage() {} -func (*BasicAuthenticationSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } - -func (m *BasicAuthenticationSecurity) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *BasicAuthenticationSecurity) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *BasicAuthenticationSecurity) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type BodyParameter struct { - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"` - // The name of the parameter. - Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` - // Determines the location of the parameter. - In string `protobuf:"bytes,3,opt,name=in" json:"in,omitempty"` - // Determines whether or not this parameter is required or optional. - Required bool `protobuf:"varint,4,opt,name=required" json:"required,omitempty"` - Schema *Schema `protobuf:"bytes,5,opt,name=schema" json:"schema,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *BodyParameter) Reset() { *m = BodyParameter{} } -func (m *BodyParameter) String() string { return proto.CompactTextString(m) } -func (*BodyParameter) ProtoMessage() {} -func (*BodyParameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } - -func (m *BodyParameter) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *BodyParameter) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *BodyParameter) GetIn() string { - if m != nil { - return m.In - } - return "" -} - -func (m *BodyParameter) GetRequired() bool { - if m != nil { - return m.Required - } - return false -} - -func (m *BodyParameter) GetSchema() *Schema { - if m != nil { - return m.Schema - } - return nil -} - -func (m *BodyParameter) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -// Contact information for the owners of the API. -type Contact struct { - // The identifying name of the contact person/organization. - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // The URL pointing to the contact information. - Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` - // The email address of the contact person/organization. - Email string `protobuf:"bytes,3,opt,name=email" json:"email,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *Contact) Reset() { *m = Contact{} } -func (m *Contact) String() string { return proto.CompactTextString(m) } -func (*Contact) ProtoMessage() {} -func (*Contact) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } - -func (m *Contact) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Contact) GetUrl() string { - if m != nil { - return m.Url - } - return "" -} - -func (m *Contact) GetEmail() string { - if m != nil { - return m.Email - } - return "" -} - -func (m *Contact) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type Default struct { - AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` -} - -func (m *Default) Reset() { *m = Default{} } -func (m *Default) String() string { return proto.CompactTextString(m) } -func (*Default) ProtoMessage() {} -func (*Default) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } - -func (m *Default) GetAdditionalProperties() []*NamedAny { - if m != nil { - return m.AdditionalProperties - } - return nil -} - -// One or more JSON objects describing the schemas being consumed and produced by the API. -type Definitions struct { - AdditionalProperties []*NamedSchema `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` -} - -func (m *Definitions) Reset() { *m = Definitions{} } -func (m *Definitions) String() string { return proto.CompactTextString(m) } -func (*Definitions) ProtoMessage() {} -func (*Definitions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } - -func (m *Definitions) GetAdditionalProperties() []*NamedSchema { - if m != nil { - return m.AdditionalProperties - } - return nil -} - -type Document struct { - // The Swagger version of this document. - Swagger string `protobuf:"bytes,1,opt,name=swagger" json:"swagger,omitempty"` - Info *Info `protobuf:"bytes,2,opt,name=info" json:"info,omitempty"` - // The host (name or ip) of the API. Example: 'swagger.io' - Host string `protobuf:"bytes,3,opt,name=host" json:"host,omitempty"` - // The base path to the API. Example: '/api'. - BasePath string `protobuf:"bytes,4,opt,name=base_path,json=basePath" json:"base_path,omitempty"` - // The transfer protocol of the API. - Schemes []string `protobuf:"bytes,5,rep,name=schemes" json:"schemes,omitempty"` - // A list of MIME types accepted by the API. - Consumes []string `protobuf:"bytes,6,rep,name=consumes" json:"consumes,omitempty"` - // A list of MIME types the API can produce. - Produces []string `protobuf:"bytes,7,rep,name=produces" json:"produces,omitempty"` - Paths *Paths `protobuf:"bytes,8,opt,name=paths" json:"paths,omitempty"` - Definitions *Definitions `protobuf:"bytes,9,opt,name=definitions" json:"definitions,omitempty"` - Parameters *ParameterDefinitions `protobuf:"bytes,10,opt,name=parameters" json:"parameters,omitempty"` - Responses *ResponseDefinitions `protobuf:"bytes,11,opt,name=responses" json:"responses,omitempty"` - Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security" json:"security,omitempty"` - SecurityDefinitions *SecurityDefinitions `protobuf:"bytes,13,opt,name=security_definitions,json=securityDefinitions" json:"security_definitions,omitempty"` - Tags []*Tag `protobuf:"bytes,14,rep,name=tags" json:"tags,omitempty"` - ExternalDocs *ExternalDocs `protobuf:"bytes,15,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,16,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *Document) Reset() { *m = Document{} } -func (m *Document) String() string { return proto.CompactTextString(m) } -func (*Document) ProtoMessage() {} -func (*Document) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } - -func (m *Document) GetSwagger() string { - if m != nil { - return m.Swagger - } - return "" -} - -func (m *Document) GetInfo() *Info { - if m != nil { - return m.Info - } - return nil -} - -func (m *Document) GetHost() string { - if m != nil { - return m.Host - } - return "" -} - -func (m *Document) GetBasePath() string { - if m != nil { - return m.BasePath - } - return "" -} - -func (m *Document) GetSchemes() []string { - if m != nil { - return m.Schemes - } - return nil -} - -func (m *Document) GetConsumes() []string { - if m != nil { - return m.Consumes - } - return nil -} - -func (m *Document) GetProduces() []string { - if m != nil { - return m.Produces - } - return nil -} - -func (m *Document) GetPaths() *Paths { - if m != nil { - return m.Paths - } - return nil -} - -func (m *Document) GetDefinitions() *Definitions { - if m != nil { - return m.Definitions - } - return nil -} - -func (m *Document) GetParameters() *ParameterDefinitions { - if m != nil { - return m.Parameters - } - return nil -} - -func (m *Document) GetResponses() *ResponseDefinitions { - if m != nil { - return m.Responses - } - return nil -} - -func (m *Document) GetSecurity() []*SecurityRequirement { - if m != nil { - return m.Security - } - return nil -} - -func (m *Document) GetSecurityDefinitions() *SecurityDefinitions { - if m != nil { - return m.SecurityDefinitions - } - return nil -} - -func (m *Document) GetTags() []*Tag { - if m != nil { - return m.Tags - } - return nil -} - -func (m *Document) GetExternalDocs() *ExternalDocs { - if m != nil { - return m.ExternalDocs - } - return nil -} - -func (m *Document) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type Examples struct { - AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` -} - -func (m *Examples) Reset() { *m = Examples{} } -func (m *Examples) String() string { return proto.CompactTextString(m) } -func (*Examples) ProtoMessage() {} -func (*Examples) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } - -func (m *Examples) GetAdditionalProperties() []*NamedAny { - if m != nil { - return m.AdditionalProperties - } - return nil -} - -// information about external documentation -type ExternalDocs struct { - Description string `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"` - Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *ExternalDocs) Reset() { *m = ExternalDocs{} } -func (m *ExternalDocs) String() string { return proto.CompactTextString(m) } -func (*ExternalDocs) ProtoMessage() {} -func (*ExternalDocs) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } - -func (m *ExternalDocs) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *ExternalDocs) GetUrl() string { - if m != nil { - return m.Url - } - return "" -} - -func (m *ExternalDocs) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -// A deterministic version of a JSON Schema object. -type FileSchema struct { - Format string `protobuf:"bytes,1,opt,name=format" json:"format,omitempty"` - Title string `protobuf:"bytes,2,opt,name=title" json:"title,omitempty"` - Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` - Default *Any `protobuf:"bytes,4,opt,name=default" json:"default,omitempty"` - Required []string `protobuf:"bytes,5,rep,name=required" json:"required,omitempty"` - Type string `protobuf:"bytes,6,opt,name=type" json:"type,omitempty"` - ReadOnly bool `protobuf:"varint,7,opt,name=read_only,json=readOnly" json:"read_only,omitempty"` - ExternalDocs *ExternalDocs `protobuf:"bytes,8,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` - Example *Any `protobuf:"bytes,9,opt,name=example" json:"example,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *FileSchema) Reset() { *m = FileSchema{} } -func (m *FileSchema) String() string { return proto.CompactTextString(m) } -func (*FileSchema) ProtoMessage() {} -func (*FileSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } - -func (m *FileSchema) GetFormat() string { - if m != nil { - return m.Format - } - return "" -} - -func (m *FileSchema) GetTitle() string { - if m != nil { - return m.Title - } - return "" -} - -func (m *FileSchema) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *FileSchema) GetDefault() *Any { - if m != nil { - return m.Default - } - return nil -} - -func (m *FileSchema) GetRequired() []string { - if m != nil { - return m.Required - } - return nil -} - -func (m *FileSchema) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *FileSchema) GetReadOnly() bool { - if m != nil { - return m.ReadOnly - } - return false -} - -func (m *FileSchema) GetExternalDocs() *ExternalDocs { - if m != nil { - return m.ExternalDocs - } - return nil -} - -func (m *FileSchema) GetExample() *Any { - if m != nil { - return m.Example - } - return nil -} - -func (m *FileSchema) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type FormDataParameterSubSchema struct { - // Determines whether or not this parameter is required or optional. - Required bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"` - // Determines the location of the parameter. - In string `protobuf:"bytes,2,opt,name=in" json:"in,omitempty"` - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` - // The name of the parameter. - Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` - // allows sending a parameter by name only or with an empty value. - AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue" json:"allow_empty_value,omitempty"` - Type string `protobuf:"bytes,6,opt,name=type" json:"type,omitempty"` - Format string `protobuf:"bytes,7,opt,name=format" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,10,opt,name=default" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,11,opt,name=maximum" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,13,opt,name=minimum" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,17,opt,name=pattern" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,21,rep,name=enum" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *FormDataParameterSubSchema) Reset() { *m = FormDataParameterSubSchema{} } -func (m *FormDataParameterSubSchema) String() string { return proto.CompactTextString(m) } -func (*FormDataParameterSubSchema) ProtoMessage() {} -func (*FormDataParameterSubSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } - -func (m *FormDataParameterSubSchema) GetRequired() bool { - if m != nil { - return m.Required - } - return false -} - -func (m *FormDataParameterSubSchema) GetIn() string { - if m != nil { - return m.In - } - return "" -} - -func (m *FormDataParameterSubSchema) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *FormDataParameterSubSchema) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *FormDataParameterSubSchema) GetAllowEmptyValue() bool { - if m != nil { - return m.AllowEmptyValue - } - return false -} - -func (m *FormDataParameterSubSchema) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *FormDataParameterSubSchema) GetFormat() string { - if m != nil { - return m.Format - } - return "" -} - -func (m *FormDataParameterSubSchema) GetItems() *PrimitivesItems { - if m != nil { - return m.Items - } - return nil -} - -func (m *FormDataParameterSubSchema) GetCollectionFormat() string { - if m != nil { - return m.CollectionFormat - } - return "" -} - -func (m *FormDataParameterSubSchema) GetDefault() *Any { - if m != nil { - return m.Default - } - return nil -} - -func (m *FormDataParameterSubSchema) GetMaximum() float64 { - if m != nil { - return m.Maximum - } - return 0 -} - -func (m *FormDataParameterSubSchema) GetExclusiveMaximum() bool { - if m != nil { - return m.ExclusiveMaximum - } - return false -} - -func (m *FormDataParameterSubSchema) GetMinimum() float64 { - if m != nil { - return m.Minimum - } - return 0 -} - -func (m *FormDataParameterSubSchema) GetExclusiveMinimum() bool { - if m != nil { - return m.ExclusiveMinimum - } - return false -} - -func (m *FormDataParameterSubSchema) GetMaxLength() int64 { - if m != nil { - return m.MaxLength - } - return 0 -} - -func (m *FormDataParameterSubSchema) GetMinLength() int64 { - if m != nil { - return m.MinLength - } - return 0 -} - -func (m *FormDataParameterSubSchema) GetPattern() string { - if m != nil { - return m.Pattern - } - return "" -} - -func (m *FormDataParameterSubSchema) GetMaxItems() int64 { - if m != nil { - return m.MaxItems - } - return 0 -} - -func (m *FormDataParameterSubSchema) GetMinItems() int64 { - if m != nil { - return m.MinItems - } - return 0 -} - -func (m *FormDataParameterSubSchema) GetUniqueItems() bool { - if m != nil { - return m.UniqueItems - } - return false -} - -func (m *FormDataParameterSubSchema) GetEnum() []*Any { - if m != nil { - return m.Enum - } - return nil -} - -func (m *FormDataParameterSubSchema) GetMultipleOf() float64 { - if m != nil { - return m.MultipleOf - } - return 0 -} - -func (m *FormDataParameterSubSchema) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type Header struct { - Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` - Format string `protobuf:"bytes,2,opt,name=format" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,3,opt,name=items" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,5,opt,name=default" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,6,opt,name=maximum" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,8,opt,name=minimum" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,10,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,11,opt,name=min_length,json=minLength" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,12,opt,name=pattern" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,13,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,14,opt,name=min_items,json=minItems" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,16,rep,name=enum" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` - Description string `protobuf:"bytes,18,opt,name=description" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,19,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *Header) Reset() { *m = Header{} } -func (m *Header) String() string { return proto.CompactTextString(m) } -func (*Header) ProtoMessage() {} -func (*Header) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } - -func (m *Header) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *Header) GetFormat() string { - if m != nil { - return m.Format - } - return "" -} - -func (m *Header) GetItems() *PrimitivesItems { - if m != nil { - return m.Items - } - return nil -} - -func (m *Header) GetCollectionFormat() string { - if m != nil { - return m.CollectionFormat - } - return "" -} - -func (m *Header) GetDefault() *Any { - if m != nil { - return m.Default - } - return nil -} - -func (m *Header) GetMaximum() float64 { - if m != nil { - return m.Maximum - } - return 0 -} - -func (m *Header) GetExclusiveMaximum() bool { - if m != nil { - return m.ExclusiveMaximum - } - return false -} - -func (m *Header) GetMinimum() float64 { - if m != nil { - return m.Minimum - } - return 0 -} - -func (m *Header) GetExclusiveMinimum() bool { - if m != nil { - return m.ExclusiveMinimum - } - return false -} - -func (m *Header) GetMaxLength() int64 { - if m != nil { - return m.MaxLength - } - return 0 -} - -func (m *Header) GetMinLength() int64 { - if m != nil { - return m.MinLength - } - return 0 -} - -func (m *Header) GetPattern() string { - if m != nil { - return m.Pattern - } - return "" -} - -func (m *Header) GetMaxItems() int64 { - if m != nil { - return m.MaxItems - } - return 0 -} - -func (m *Header) GetMinItems() int64 { - if m != nil { - return m.MinItems - } - return 0 -} - -func (m *Header) GetUniqueItems() bool { - if m != nil { - return m.UniqueItems - } - return false -} - -func (m *Header) GetEnum() []*Any { - if m != nil { - return m.Enum - } - return nil -} - -func (m *Header) GetMultipleOf() float64 { - if m != nil { - return m.MultipleOf - } - return 0 -} - -func (m *Header) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Header) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type HeaderParameterSubSchema struct { - // Determines whether or not this parameter is required or optional. - Required bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"` - // Determines the location of the parameter. - In string `protobuf:"bytes,2,opt,name=in" json:"in,omitempty"` - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` - // The name of the parameter. - Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` - Type string `protobuf:"bytes,5,opt,name=type" json:"type,omitempty"` - Format string `protobuf:"bytes,6,opt,name=format" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,7,opt,name=items" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,9,opt,name=default" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,10,opt,name=maximum" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,12,opt,name=minimum" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,14,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,15,opt,name=min_length,json=minLength" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,16,opt,name=pattern" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,17,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,18,opt,name=min_items,json=minItems" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,20,rep,name=enum" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *HeaderParameterSubSchema) Reset() { *m = HeaderParameterSubSchema{} } -func (m *HeaderParameterSubSchema) String() string { return proto.CompactTextString(m) } -func (*HeaderParameterSubSchema) ProtoMessage() {} -func (*HeaderParameterSubSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } - -func (m *HeaderParameterSubSchema) GetRequired() bool { - if m != nil { - return m.Required - } - return false -} - -func (m *HeaderParameterSubSchema) GetIn() string { - if m != nil { - return m.In - } - return "" -} - -func (m *HeaderParameterSubSchema) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *HeaderParameterSubSchema) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *HeaderParameterSubSchema) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *HeaderParameterSubSchema) GetFormat() string { - if m != nil { - return m.Format - } - return "" -} - -func (m *HeaderParameterSubSchema) GetItems() *PrimitivesItems { - if m != nil { - return m.Items - } - return nil -} - -func (m *HeaderParameterSubSchema) GetCollectionFormat() string { - if m != nil { - return m.CollectionFormat - } - return "" -} - -func (m *HeaderParameterSubSchema) GetDefault() *Any { - if m != nil { - return m.Default - } - return nil -} - -func (m *HeaderParameterSubSchema) GetMaximum() float64 { - if m != nil { - return m.Maximum - } - return 0 -} - -func (m *HeaderParameterSubSchema) GetExclusiveMaximum() bool { - if m != nil { - return m.ExclusiveMaximum - } - return false -} - -func (m *HeaderParameterSubSchema) GetMinimum() float64 { - if m != nil { - return m.Minimum - } - return 0 -} - -func (m *HeaderParameterSubSchema) GetExclusiveMinimum() bool { - if m != nil { - return m.ExclusiveMinimum - } - return false -} - -func (m *HeaderParameterSubSchema) GetMaxLength() int64 { - if m != nil { - return m.MaxLength - } - return 0 -} - -func (m *HeaderParameterSubSchema) GetMinLength() int64 { - if m != nil { - return m.MinLength - } - return 0 -} - -func (m *HeaderParameterSubSchema) GetPattern() string { - if m != nil { - return m.Pattern - } - return "" -} - -func (m *HeaderParameterSubSchema) GetMaxItems() int64 { - if m != nil { - return m.MaxItems - } - return 0 -} - -func (m *HeaderParameterSubSchema) GetMinItems() int64 { - if m != nil { - return m.MinItems - } - return 0 -} - -func (m *HeaderParameterSubSchema) GetUniqueItems() bool { - if m != nil { - return m.UniqueItems - } - return false -} - -func (m *HeaderParameterSubSchema) GetEnum() []*Any { - if m != nil { - return m.Enum - } - return nil -} - -func (m *HeaderParameterSubSchema) GetMultipleOf() float64 { - if m != nil { - return m.MultipleOf - } - return 0 -} - -func (m *HeaderParameterSubSchema) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type Headers struct { - AdditionalProperties []*NamedHeader `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` -} - -func (m *Headers) Reset() { *m = Headers{} } -func (m *Headers) String() string { return proto.CompactTextString(m) } -func (*Headers) ProtoMessage() {} -func (*Headers) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } - -func (m *Headers) GetAdditionalProperties() []*NamedHeader { - if m != nil { - return m.AdditionalProperties - } - return nil -} - -// General information about the API. -type Info struct { - // A unique and precise title of the API. - Title string `protobuf:"bytes,1,opt,name=title" json:"title,omitempty"` - // A semantic version number of the API. - Version string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` - // A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` - // The terms of service for the API. - TermsOfService string `protobuf:"bytes,4,opt,name=terms_of_service,json=termsOfService" json:"terms_of_service,omitempty"` - Contact *Contact `protobuf:"bytes,5,opt,name=contact" json:"contact,omitempty"` - License *License `protobuf:"bytes,6,opt,name=license" json:"license,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *Info) Reset() { *m = Info{} } -func (m *Info) String() string { return proto.CompactTextString(m) } -func (*Info) ProtoMessage() {} -func (*Info) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } - -func (m *Info) GetTitle() string { - if m != nil { - return m.Title - } - return "" -} - -func (m *Info) GetVersion() string { - if m != nil { - return m.Version - } - return "" -} - -func (m *Info) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Info) GetTermsOfService() string { - if m != nil { - return m.TermsOfService - } - return "" -} - -func (m *Info) GetContact() *Contact { - if m != nil { - return m.Contact - } - return nil -} - -func (m *Info) GetLicense() *License { - if m != nil { - return m.License - } - return nil -} - -func (m *Info) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type ItemsItem struct { - Schema []*Schema `protobuf:"bytes,1,rep,name=schema" json:"schema,omitempty"` -} - -func (m *ItemsItem) Reset() { *m = ItemsItem{} } -func (m *ItemsItem) String() string { return proto.CompactTextString(m) } -func (*ItemsItem) ProtoMessage() {} -func (*ItemsItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } - -func (m *ItemsItem) GetSchema() []*Schema { - if m != nil { - return m.Schema - } - return nil -} - -type JsonReference struct { - XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref" json:"_ref,omitempty"` - Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` -} - -func (m *JsonReference) Reset() { *m = JsonReference{} } -func (m *JsonReference) String() string { return proto.CompactTextString(m) } -func (*JsonReference) ProtoMessage() {} -func (*JsonReference) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } - -func (m *JsonReference) GetXRef() string { - if m != nil { - return m.XRef - } - return "" -} - -func (m *JsonReference) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -type License struct { - // The name of the license type. It's encouraged to use an OSI compatible license. - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // The URL pointing to the license. - Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *License) Reset() { *m = License{} } -func (m *License) String() string { return proto.CompactTextString(m) } -func (*License) ProtoMessage() {} -func (*License) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } - -func (m *License) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *License) GetUrl() string { - if m != nil { - return m.Url - } - return "" -} - -func (m *License) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -// Automatically-generated message used to represent maps of Any as ordered (name,value) pairs. -type NamedAny struct { - // Map key - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // Mapped value - Value *Any `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` -} - -func (m *NamedAny) Reset() { *m = NamedAny{} } -func (m *NamedAny) String() string { return proto.CompactTextString(m) } -func (*NamedAny) ProtoMessage() {} -func (*NamedAny) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } - -func (m *NamedAny) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *NamedAny) GetValue() *Any { - if m != nil { - return m.Value - } - return nil -} - -// Automatically-generated message used to represent maps of Header as ordered (name,value) pairs. -type NamedHeader struct { - // Map key - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // Mapped value - Value *Header `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` -} - -func (m *NamedHeader) Reset() { *m = NamedHeader{} } -func (m *NamedHeader) String() string { return proto.CompactTextString(m) } -func (*NamedHeader) ProtoMessage() {} -func (*NamedHeader) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } - -func (m *NamedHeader) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *NamedHeader) GetValue() *Header { - if m != nil { - return m.Value - } - return nil -} - -// Automatically-generated message used to represent maps of Parameter as ordered (name,value) pairs. -type NamedParameter struct { - // Map key - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // Mapped value - Value *Parameter `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` -} - -func (m *NamedParameter) Reset() { *m = NamedParameter{} } -func (m *NamedParameter) String() string { return proto.CompactTextString(m) } -func (*NamedParameter) ProtoMessage() {} -func (*NamedParameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } - -func (m *NamedParameter) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *NamedParameter) GetValue() *Parameter { - if m != nil { - return m.Value - } - return nil -} - -// Automatically-generated message used to represent maps of PathItem as ordered (name,value) pairs. -type NamedPathItem struct { - // Map key - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // Mapped value - Value *PathItem `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` -} - -func (m *NamedPathItem) Reset() { *m = NamedPathItem{} } -func (m *NamedPathItem) String() string { return proto.CompactTextString(m) } -func (*NamedPathItem) ProtoMessage() {} -func (*NamedPathItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } - -func (m *NamedPathItem) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *NamedPathItem) GetValue() *PathItem { - if m != nil { - return m.Value - } - return nil -} - -// Automatically-generated message used to represent maps of Response as ordered (name,value) pairs. -type NamedResponse struct { - // Map key - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // Mapped value - Value *Response `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` -} - -func (m *NamedResponse) Reset() { *m = NamedResponse{} } -func (m *NamedResponse) String() string { return proto.CompactTextString(m) } -func (*NamedResponse) ProtoMessage() {} -func (*NamedResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } - -func (m *NamedResponse) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *NamedResponse) GetValue() *Response { - if m != nil { - return m.Value - } - return nil -} - -// Automatically-generated message used to represent maps of ResponseValue as ordered (name,value) pairs. -type NamedResponseValue struct { - // Map key - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // Mapped value - Value *ResponseValue `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` -} - -func (m *NamedResponseValue) Reset() { *m = NamedResponseValue{} } -func (m *NamedResponseValue) String() string { return proto.CompactTextString(m) } -func (*NamedResponseValue) ProtoMessage() {} -func (*NamedResponseValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } - -func (m *NamedResponseValue) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *NamedResponseValue) GetValue() *ResponseValue { - if m != nil { - return m.Value - } - return nil -} - -// Automatically-generated message used to represent maps of Schema as ordered (name,value) pairs. -type NamedSchema struct { - // Map key - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // Mapped value - Value *Schema `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` -} - -func (m *NamedSchema) Reset() { *m = NamedSchema{} } -func (m *NamedSchema) String() string { return proto.CompactTextString(m) } -func (*NamedSchema) ProtoMessage() {} -func (*NamedSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } - -func (m *NamedSchema) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *NamedSchema) GetValue() *Schema { - if m != nil { - return m.Value - } - return nil -} - -// Automatically-generated message used to represent maps of SecurityDefinitionsItem as ordered (name,value) pairs. -type NamedSecurityDefinitionsItem struct { - // Map key - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // Mapped value - Value *SecurityDefinitionsItem `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` -} - -func (m *NamedSecurityDefinitionsItem) Reset() { *m = NamedSecurityDefinitionsItem{} } -func (m *NamedSecurityDefinitionsItem) String() string { return proto.CompactTextString(m) } -func (*NamedSecurityDefinitionsItem) ProtoMessage() {} -func (*NamedSecurityDefinitionsItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } - -func (m *NamedSecurityDefinitionsItem) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *NamedSecurityDefinitionsItem) GetValue() *SecurityDefinitionsItem { - if m != nil { - return m.Value - } - return nil -} - -// Automatically-generated message used to represent maps of string as ordered (name,value) pairs. -type NamedString struct { - // Map key - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // Mapped value - Value string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` -} - -func (m *NamedString) Reset() { *m = NamedString{} } -func (m *NamedString) String() string { return proto.CompactTextString(m) } -func (*NamedString) ProtoMessage() {} -func (*NamedString) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } - -func (m *NamedString) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *NamedString) GetValue() string { - if m != nil { - return m.Value - } - return "" -} - -// Automatically-generated message used to represent maps of StringArray as ordered (name,value) pairs. -type NamedStringArray struct { - // Map key - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // Mapped value - Value *StringArray `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` -} - -func (m *NamedStringArray) Reset() { *m = NamedStringArray{} } -func (m *NamedStringArray) String() string { return proto.CompactTextString(m) } -func (*NamedStringArray) ProtoMessage() {} -func (*NamedStringArray) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } - -func (m *NamedStringArray) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *NamedStringArray) GetValue() *StringArray { - if m != nil { - return m.Value - } - return nil -} - -type NonBodyParameter struct { - // Types that are valid to be assigned to Oneof: - // *NonBodyParameter_HeaderParameterSubSchema - // *NonBodyParameter_FormDataParameterSubSchema - // *NonBodyParameter_QueryParameterSubSchema - // *NonBodyParameter_PathParameterSubSchema - Oneof isNonBodyParameter_Oneof `protobuf_oneof:"oneof"` -} - -func (m *NonBodyParameter) Reset() { *m = NonBodyParameter{} } -func (m *NonBodyParameter) String() string { return proto.CompactTextString(m) } -func (*NonBodyParameter) ProtoMessage() {} -func (*NonBodyParameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } - -type isNonBodyParameter_Oneof interface { - isNonBodyParameter_Oneof() -} - -type NonBodyParameter_HeaderParameterSubSchema struct { - HeaderParameterSubSchema *HeaderParameterSubSchema `protobuf:"bytes,1,opt,name=header_parameter_sub_schema,json=headerParameterSubSchema,oneof"` -} -type NonBodyParameter_FormDataParameterSubSchema struct { - FormDataParameterSubSchema *FormDataParameterSubSchema `protobuf:"bytes,2,opt,name=form_data_parameter_sub_schema,json=formDataParameterSubSchema,oneof"` -} -type NonBodyParameter_QueryParameterSubSchema struct { - QueryParameterSubSchema *QueryParameterSubSchema `protobuf:"bytes,3,opt,name=query_parameter_sub_schema,json=queryParameterSubSchema,oneof"` -} -type NonBodyParameter_PathParameterSubSchema struct { - PathParameterSubSchema *PathParameterSubSchema `protobuf:"bytes,4,opt,name=path_parameter_sub_schema,json=pathParameterSubSchema,oneof"` -} - -func (*NonBodyParameter_HeaderParameterSubSchema) isNonBodyParameter_Oneof() {} -func (*NonBodyParameter_FormDataParameterSubSchema) isNonBodyParameter_Oneof() {} -func (*NonBodyParameter_QueryParameterSubSchema) isNonBodyParameter_Oneof() {} -func (*NonBodyParameter_PathParameterSubSchema) isNonBodyParameter_Oneof() {} - -func (m *NonBodyParameter) GetOneof() isNonBodyParameter_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (m *NonBodyParameter) GetHeaderParameterSubSchema() *HeaderParameterSubSchema { - if x, ok := m.GetOneof().(*NonBodyParameter_HeaderParameterSubSchema); ok { - return x.HeaderParameterSubSchema - } - return nil -} - -func (m *NonBodyParameter) GetFormDataParameterSubSchema() *FormDataParameterSubSchema { - if x, ok := m.GetOneof().(*NonBodyParameter_FormDataParameterSubSchema); ok { - return x.FormDataParameterSubSchema - } - return nil -} - -func (m *NonBodyParameter) GetQueryParameterSubSchema() *QueryParameterSubSchema { - if x, ok := m.GetOneof().(*NonBodyParameter_QueryParameterSubSchema); ok { - return x.QueryParameterSubSchema - } - return nil -} - -func (m *NonBodyParameter) GetPathParameterSubSchema() *PathParameterSubSchema { - if x, ok := m.GetOneof().(*NonBodyParameter_PathParameterSubSchema); ok { - return x.PathParameterSubSchema - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*NonBodyParameter) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _NonBodyParameter_OneofMarshaler, _NonBodyParameter_OneofUnmarshaler, _NonBodyParameter_OneofSizer, []interface{}{ - (*NonBodyParameter_HeaderParameterSubSchema)(nil), - (*NonBodyParameter_FormDataParameterSubSchema)(nil), - (*NonBodyParameter_QueryParameterSubSchema)(nil), - (*NonBodyParameter_PathParameterSubSchema)(nil), - } -} - -func _NonBodyParameter_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*NonBodyParameter) - // oneof - switch x := m.Oneof.(type) { - case *NonBodyParameter_HeaderParameterSubSchema: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.HeaderParameterSubSchema); err != nil { - return err - } - case *NonBodyParameter_FormDataParameterSubSchema: - b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.FormDataParameterSubSchema); err != nil { - return err - } - case *NonBodyParameter_QueryParameterSubSchema: - b.EncodeVarint(3<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.QueryParameterSubSchema); err != nil { - return err - } - case *NonBodyParameter_PathParameterSubSchema: - b.EncodeVarint(4<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.PathParameterSubSchema); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("NonBodyParameter.Oneof has unexpected type %T", x) - } - return nil -} - -func _NonBodyParameter_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*NonBodyParameter) - switch tag { - case 1: // oneof.header_parameter_sub_schema - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(HeaderParameterSubSchema) - err := b.DecodeMessage(msg) - m.Oneof = &NonBodyParameter_HeaderParameterSubSchema{msg} - return true, err - case 2: // oneof.form_data_parameter_sub_schema - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(FormDataParameterSubSchema) - err := b.DecodeMessage(msg) - m.Oneof = &NonBodyParameter_FormDataParameterSubSchema{msg} - return true, err - case 3: // oneof.query_parameter_sub_schema - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(QueryParameterSubSchema) - err := b.DecodeMessage(msg) - m.Oneof = &NonBodyParameter_QueryParameterSubSchema{msg} - return true, err - case 4: // oneof.path_parameter_sub_schema - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(PathParameterSubSchema) - err := b.DecodeMessage(msg) - m.Oneof = &NonBodyParameter_PathParameterSubSchema{msg} - return true, err - default: - return false, nil - } -} - -func _NonBodyParameter_OneofSizer(msg proto.Message) (n int) { - m := msg.(*NonBodyParameter) - // oneof - switch x := m.Oneof.(type) { - case *NonBodyParameter_HeaderParameterSubSchema: - s := proto.Size(x.HeaderParameterSubSchema) - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *NonBodyParameter_FormDataParameterSubSchema: - s := proto.Size(x.FormDataParameterSubSchema) - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *NonBodyParameter_QueryParameterSubSchema: - s := proto.Size(x.QueryParameterSubSchema) - n += proto.SizeVarint(3<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *NonBodyParameter_PathParameterSubSchema: - s := proto.Size(x.PathParameterSubSchema) - n += proto.SizeVarint(4<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -type Oauth2AccessCodeSecurity struct { - Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` - Flow string `protobuf:"bytes,2,opt,name=flow" json:"flow,omitempty"` - Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes" json:"scopes,omitempty"` - AuthorizationUrl string `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl" json:"authorization_url,omitempty"` - TokenUrl string `protobuf:"bytes,5,opt,name=token_url,json=tokenUrl" json:"token_url,omitempty"` - Description string `protobuf:"bytes,6,opt,name=description" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *Oauth2AccessCodeSecurity) Reset() { *m = Oauth2AccessCodeSecurity{} } -func (m *Oauth2AccessCodeSecurity) String() string { return proto.CompactTextString(m) } -func (*Oauth2AccessCodeSecurity) ProtoMessage() {} -func (*Oauth2AccessCodeSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } - -func (m *Oauth2AccessCodeSecurity) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *Oauth2AccessCodeSecurity) GetFlow() string { - if m != nil { - return m.Flow - } - return "" -} - -func (m *Oauth2AccessCodeSecurity) GetScopes() *Oauth2Scopes { - if m != nil { - return m.Scopes - } - return nil -} - -func (m *Oauth2AccessCodeSecurity) GetAuthorizationUrl() string { - if m != nil { - return m.AuthorizationUrl - } - return "" -} - -func (m *Oauth2AccessCodeSecurity) GetTokenUrl() string { - if m != nil { - return m.TokenUrl - } - return "" -} - -func (m *Oauth2AccessCodeSecurity) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Oauth2AccessCodeSecurity) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type Oauth2ApplicationSecurity struct { - Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` - Flow string `protobuf:"bytes,2,opt,name=flow" json:"flow,omitempty"` - Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes" json:"scopes,omitempty"` - TokenUrl string `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl" json:"token_url,omitempty"` - Description string `protobuf:"bytes,5,opt,name=description" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *Oauth2ApplicationSecurity) Reset() { *m = Oauth2ApplicationSecurity{} } -func (m *Oauth2ApplicationSecurity) String() string { return proto.CompactTextString(m) } -func (*Oauth2ApplicationSecurity) ProtoMessage() {} -func (*Oauth2ApplicationSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } - -func (m *Oauth2ApplicationSecurity) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *Oauth2ApplicationSecurity) GetFlow() string { - if m != nil { - return m.Flow - } - return "" -} - -func (m *Oauth2ApplicationSecurity) GetScopes() *Oauth2Scopes { - if m != nil { - return m.Scopes - } - return nil -} - -func (m *Oauth2ApplicationSecurity) GetTokenUrl() string { - if m != nil { - return m.TokenUrl - } - return "" -} - -func (m *Oauth2ApplicationSecurity) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Oauth2ApplicationSecurity) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type Oauth2ImplicitSecurity struct { - Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` - Flow string `protobuf:"bytes,2,opt,name=flow" json:"flow,omitempty"` - Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes" json:"scopes,omitempty"` - AuthorizationUrl string `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl" json:"authorization_url,omitempty"` - Description string `protobuf:"bytes,5,opt,name=description" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *Oauth2ImplicitSecurity) Reset() { *m = Oauth2ImplicitSecurity{} } -func (m *Oauth2ImplicitSecurity) String() string { return proto.CompactTextString(m) } -func (*Oauth2ImplicitSecurity) ProtoMessage() {} -func (*Oauth2ImplicitSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} } - -func (m *Oauth2ImplicitSecurity) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *Oauth2ImplicitSecurity) GetFlow() string { - if m != nil { - return m.Flow - } - return "" -} - -func (m *Oauth2ImplicitSecurity) GetScopes() *Oauth2Scopes { - if m != nil { - return m.Scopes - } - return nil -} - -func (m *Oauth2ImplicitSecurity) GetAuthorizationUrl() string { - if m != nil { - return m.AuthorizationUrl - } - return "" -} - -func (m *Oauth2ImplicitSecurity) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Oauth2ImplicitSecurity) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type Oauth2PasswordSecurity struct { - Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` - Flow string `protobuf:"bytes,2,opt,name=flow" json:"flow,omitempty"` - Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes" json:"scopes,omitempty"` - TokenUrl string `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl" json:"token_url,omitempty"` - Description string `protobuf:"bytes,5,opt,name=description" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *Oauth2PasswordSecurity) Reset() { *m = Oauth2PasswordSecurity{} } -func (m *Oauth2PasswordSecurity) String() string { return proto.CompactTextString(m) } -func (*Oauth2PasswordSecurity) ProtoMessage() {} -func (*Oauth2PasswordSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} } - -func (m *Oauth2PasswordSecurity) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *Oauth2PasswordSecurity) GetFlow() string { - if m != nil { - return m.Flow - } - return "" -} - -func (m *Oauth2PasswordSecurity) GetScopes() *Oauth2Scopes { - if m != nil { - return m.Scopes - } - return nil -} - -func (m *Oauth2PasswordSecurity) GetTokenUrl() string { - if m != nil { - return m.TokenUrl - } - return "" -} - -func (m *Oauth2PasswordSecurity) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Oauth2PasswordSecurity) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type Oauth2Scopes struct { - AdditionalProperties []*NamedString `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` -} - -func (m *Oauth2Scopes) Reset() { *m = Oauth2Scopes{} } -func (m *Oauth2Scopes) String() string { return proto.CompactTextString(m) } -func (*Oauth2Scopes) ProtoMessage() {} -func (*Oauth2Scopes) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} } - -func (m *Oauth2Scopes) GetAdditionalProperties() []*NamedString { - if m != nil { - return m.AdditionalProperties - } - return nil -} - -type Operation struct { - Tags []string `protobuf:"bytes,1,rep,name=tags" json:"tags,omitempty"` - // A brief summary of the operation. - Summary string `protobuf:"bytes,2,opt,name=summary" json:"summary,omitempty"` - // A longer description of the operation, GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` - ExternalDocs *ExternalDocs `protobuf:"bytes,4,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` - // A unique identifier of the operation. - OperationId string `protobuf:"bytes,5,opt,name=operation_id,json=operationId" json:"operation_id,omitempty"` - // A list of MIME types the API can produce. - Produces []string `protobuf:"bytes,6,rep,name=produces" json:"produces,omitempty"` - // A list of MIME types the API can consume. - Consumes []string `protobuf:"bytes,7,rep,name=consumes" json:"consumes,omitempty"` - // The parameters needed to send a valid API call. - Parameters []*ParametersItem `protobuf:"bytes,8,rep,name=parameters" json:"parameters,omitempty"` - Responses *Responses `protobuf:"bytes,9,opt,name=responses" json:"responses,omitempty"` - // The transfer protocol of the API. - Schemes []string `protobuf:"bytes,10,rep,name=schemes" json:"schemes,omitempty"` - Deprecated bool `protobuf:"varint,11,opt,name=deprecated" json:"deprecated,omitempty"` - Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security" json:"security,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,13,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *Operation) Reset() { *m = Operation{} } -func (m *Operation) String() string { return proto.CompactTextString(m) } -func (*Operation) ProtoMessage() {} -func (*Operation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} } - -func (m *Operation) GetTags() []string { - if m != nil { - return m.Tags - } - return nil -} - -func (m *Operation) GetSummary() string { - if m != nil { - return m.Summary - } - return "" -} - -func (m *Operation) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Operation) GetExternalDocs() *ExternalDocs { - if m != nil { - return m.ExternalDocs - } - return nil -} - -func (m *Operation) GetOperationId() string { - if m != nil { - return m.OperationId - } - return "" -} - -func (m *Operation) GetProduces() []string { - if m != nil { - return m.Produces - } - return nil -} - -func (m *Operation) GetConsumes() []string { - if m != nil { - return m.Consumes - } - return nil -} - -func (m *Operation) GetParameters() []*ParametersItem { - if m != nil { - return m.Parameters - } - return nil -} - -func (m *Operation) GetResponses() *Responses { - if m != nil { - return m.Responses - } - return nil -} - -func (m *Operation) GetSchemes() []string { - if m != nil { - return m.Schemes - } - return nil -} - -func (m *Operation) GetDeprecated() bool { - if m != nil { - return m.Deprecated - } - return false -} - -func (m *Operation) GetSecurity() []*SecurityRequirement { - if m != nil { - return m.Security - } - return nil -} - -func (m *Operation) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type Parameter struct { - // Types that are valid to be assigned to Oneof: - // *Parameter_BodyParameter - // *Parameter_NonBodyParameter - Oneof isParameter_Oneof `protobuf_oneof:"oneof"` -} - -func (m *Parameter) Reset() { *m = Parameter{} } -func (m *Parameter) String() string { return proto.CompactTextString(m) } -func (*Parameter) ProtoMessage() {} -func (*Parameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} } - -type isParameter_Oneof interface { - isParameter_Oneof() -} - -type Parameter_BodyParameter struct { - BodyParameter *BodyParameter `protobuf:"bytes,1,opt,name=body_parameter,json=bodyParameter,oneof"` -} -type Parameter_NonBodyParameter struct { - NonBodyParameter *NonBodyParameter `protobuf:"bytes,2,opt,name=non_body_parameter,json=nonBodyParameter,oneof"` -} - -func (*Parameter_BodyParameter) isParameter_Oneof() {} -func (*Parameter_NonBodyParameter) isParameter_Oneof() {} - -func (m *Parameter) GetOneof() isParameter_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (m *Parameter) GetBodyParameter() *BodyParameter { - if x, ok := m.GetOneof().(*Parameter_BodyParameter); ok { - return x.BodyParameter - } - return nil -} - -func (m *Parameter) GetNonBodyParameter() *NonBodyParameter { - if x, ok := m.GetOneof().(*Parameter_NonBodyParameter); ok { - return x.NonBodyParameter - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*Parameter) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _Parameter_OneofMarshaler, _Parameter_OneofUnmarshaler, _Parameter_OneofSizer, []interface{}{ - (*Parameter_BodyParameter)(nil), - (*Parameter_NonBodyParameter)(nil), - } -} - -func _Parameter_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*Parameter) - // oneof - switch x := m.Oneof.(type) { - case *Parameter_BodyParameter: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.BodyParameter); err != nil { - return err - } - case *Parameter_NonBodyParameter: - b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.NonBodyParameter); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("Parameter.Oneof has unexpected type %T", x) - } - return nil -} - -func _Parameter_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*Parameter) - switch tag { - case 1: // oneof.body_parameter - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(BodyParameter) - err := b.DecodeMessage(msg) - m.Oneof = &Parameter_BodyParameter{msg} - return true, err - case 2: // oneof.non_body_parameter - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(NonBodyParameter) - err := b.DecodeMessage(msg) - m.Oneof = &Parameter_NonBodyParameter{msg} - return true, err - default: - return false, nil - } -} - -func _Parameter_OneofSizer(msg proto.Message) (n int) { - m := msg.(*Parameter) - // oneof - switch x := m.Oneof.(type) { - case *Parameter_BodyParameter: - s := proto.Size(x.BodyParameter) - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *Parameter_NonBodyParameter: - s := proto.Size(x.NonBodyParameter) - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -// One or more JSON representations for parameters -type ParameterDefinitions struct { - AdditionalProperties []*NamedParameter `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` -} - -func (m *ParameterDefinitions) Reset() { *m = ParameterDefinitions{} } -func (m *ParameterDefinitions) String() string { return proto.CompactTextString(m) } -func (*ParameterDefinitions) ProtoMessage() {} -func (*ParameterDefinitions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{38} } - -func (m *ParameterDefinitions) GetAdditionalProperties() []*NamedParameter { - if m != nil { - return m.AdditionalProperties - } - return nil -} - -type ParametersItem struct { - // Types that are valid to be assigned to Oneof: - // *ParametersItem_Parameter - // *ParametersItem_JsonReference - Oneof isParametersItem_Oneof `protobuf_oneof:"oneof"` -} - -func (m *ParametersItem) Reset() { *m = ParametersItem{} } -func (m *ParametersItem) String() string { return proto.CompactTextString(m) } -func (*ParametersItem) ProtoMessage() {} -func (*ParametersItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39} } - -type isParametersItem_Oneof interface { - isParametersItem_Oneof() -} - -type ParametersItem_Parameter struct { - Parameter *Parameter `protobuf:"bytes,1,opt,name=parameter,oneof"` -} -type ParametersItem_JsonReference struct { - JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,oneof"` -} - -func (*ParametersItem_Parameter) isParametersItem_Oneof() {} -func (*ParametersItem_JsonReference) isParametersItem_Oneof() {} - -func (m *ParametersItem) GetOneof() isParametersItem_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (m *ParametersItem) GetParameter() *Parameter { - if x, ok := m.GetOneof().(*ParametersItem_Parameter); ok { - return x.Parameter - } - return nil -} - -func (m *ParametersItem) GetJsonReference() *JsonReference { - if x, ok := m.GetOneof().(*ParametersItem_JsonReference); ok { - return x.JsonReference - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*ParametersItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _ParametersItem_OneofMarshaler, _ParametersItem_OneofUnmarshaler, _ParametersItem_OneofSizer, []interface{}{ - (*ParametersItem_Parameter)(nil), - (*ParametersItem_JsonReference)(nil), - } -} - -func _ParametersItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*ParametersItem) - // oneof - switch x := m.Oneof.(type) { - case *ParametersItem_Parameter: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Parameter); err != nil { - return err - } - case *ParametersItem_JsonReference: - b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.JsonReference); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("ParametersItem.Oneof has unexpected type %T", x) - } - return nil -} - -func _ParametersItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*ParametersItem) - switch tag { - case 1: // oneof.parameter - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Parameter) - err := b.DecodeMessage(msg) - m.Oneof = &ParametersItem_Parameter{msg} - return true, err - case 2: // oneof.json_reference - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(JsonReference) - err := b.DecodeMessage(msg) - m.Oneof = &ParametersItem_JsonReference{msg} - return true, err - default: - return false, nil - } -} - -func _ParametersItem_OneofSizer(msg proto.Message) (n int) { - m := msg.(*ParametersItem) - // oneof - switch x := m.Oneof.(type) { - case *ParametersItem_Parameter: - s := proto.Size(x.Parameter) - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *ParametersItem_JsonReference: - s := proto.Size(x.JsonReference) - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -type PathItem struct { - XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref" json:"_ref,omitempty"` - Get *Operation `protobuf:"bytes,2,opt,name=get" json:"get,omitempty"` - Put *Operation `protobuf:"bytes,3,opt,name=put" json:"put,omitempty"` - Post *Operation `protobuf:"bytes,4,opt,name=post" json:"post,omitempty"` - Delete *Operation `protobuf:"bytes,5,opt,name=delete" json:"delete,omitempty"` - Options *Operation `protobuf:"bytes,6,opt,name=options" json:"options,omitempty"` - Head *Operation `protobuf:"bytes,7,opt,name=head" json:"head,omitempty"` - Patch *Operation `protobuf:"bytes,8,opt,name=patch" json:"patch,omitempty"` - // The parameters needed to send a valid API call. - Parameters []*ParametersItem `protobuf:"bytes,9,rep,name=parameters" json:"parameters,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *PathItem) Reset() { *m = PathItem{} } -func (m *PathItem) String() string { return proto.CompactTextString(m) } -func (*PathItem) ProtoMessage() {} -func (*PathItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{40} } - -func (m *PathItem) GetXRef() string { - if m != nil { - return m.XRef - } - return "" -} - -func (m *PathItem) GetGet() *Operation { - if m != nil { - return m.Get - } - return nil -} - -func (m *PathItem) GetPut() *Operation { - if m != nil { - return m.Put - } - return nil -} - -func (m *PathItem) GetPost() *Operation { - if m != nil { - return m.Post - } - return nil -} - -func (m *PathItem) GetDelete() *Operation { - if m != nil { - return m.Delete - } - return nil -} - -func (m *PathItem) GetOptions() *Operation { - if m != nil { - return m.Options - } - return nil -} - -func (m *PathItem) GetHead() *Operation { - if m != nil { - return m.Head - } - return nil -} - -func (m *PathItem) GetPatch() *Operation { - if m != nil { - return m.Patch - } - return nil -} - -func (m *PathItem) GetParameters() []*ParametersItem { - if m != nil { - return m.Parameters - } - return nil -} - -func (m *PathItem) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type PathParameterSubSchema struct { - // Determines whether or not this parameter is required or optional. - Required bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"` - // Determines the location of the parameter. - In string `protobuf:"bytes,2,opt,name=in" json:"in,omitempty"` - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` - // The name of the parameter. - Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` - Type string `protobuf:"bytes,5,opt,name=type" json:"type,omitempty"` - Format string `protobuf:"bytes,6,opt,name=format" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,7,opt,name=items" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,9,opt,name=default" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,10,opt,name=maximum" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,12,opt,name=minimum" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,14,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,15,opt,name=min_length,json=minLength" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,16,opt,name=pattern" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,17,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,18,opt,name=min_items,json=minItems" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,20,rep,name=enum" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *PathParameterSubSchema) Reset() { *m = PathParameterSubSchema{} } -func (m *PathParameterSubSchema) String() string { return proto.CompactTextString(m) } -func (*PathParameterSubSchema) ProtoMessage() {} -func (*PathParameterSubSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{41} } - -func (m *PathParameterSubSchema) GetRequired() bool { - if m != nil { - return m.Required - } - return false -} - -func (m *PathParameterSubSchema) GetIn() string { - if m != nil { - return m.In - } - return "" -} - -func (m *PathParameterSubSchema) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *PathParameterSubSchema) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *PathParameterSubSchema) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *PathParameterSubSchema) GetFormat() string { - if m != nil { - return m.Format - } - return "" -} - -func (m *PathParameterSubSchema) GetItems() *PrimitivesItems { - if m != nil { - return m.Items - } - return nil -} - -func (m *PathParameterSubSchema) GetCollectionFormat() string { - if m != nil { - return m.CollectionFormat - } - return "" -} - -func (m *PathParameterSubSchema) GetDefault() *Any { - if m != nil { - return m.Default - } - return nil -} - -func (m *PathParameterSubSchema) GetMaximum() float64 { - if m != nil { - return m.Maximum - } - return 0 -} - -func (m *PathParameterSubSchema) GetExclusiveMaximum() bool { - if m != nil { - return m.ExclusiveMaximum - } - return false -} - -func (m *PathParameterSubSchema) GetMinimum() float64 { - if m != nil { - return m.Minimum - } - return 0 -} - -func (m *PathParameterSubSchema) GetExclusiveMinimum() bool { - if m != nil { - return m.ExclusiveMinimum - } - return false -} - -func (m *PathParameterSubSchema) GetMaxLength() int64 { - if m != nil { - return m.MaxLength - } - return 0 -} - -func (m *PathParameterSubSchema) GetMinLength() int64 { - if m != nil { - return m.MinLength - } - return 0 -} - -func (m *PathParameterSubSchema) GetPattern() string { - if m != nil { - return m.Pattern - } - return "" -} - -func (m *PathParameterSubSchema) GetMaxItems() int64 { - if m != nil { - return m.MaxItems - } - return 0 -} - -func (m *PathParameterSubSchema) GetMinItems() int64 { - if m != nil { - return m.MinItems - } - return 0 -} - -func (m *PathParameterSubSchema) GetUniqueItems() bool { - if m != nil { - return m.UniqueItems - } - return false -} - -func (m *PathParameterSubSchema) GetEnum() []*Any { - if m != nil { - return m.Enum - } - return nil -} - -func (m *PathParameterSubSchema) GetMultipleOf() float64 { - if m != nil { - return m.MultipleOf - } - return 0 -} - -func (m *PathParameterSubSchema) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -// Relative paths to the individual endpoints. They must be relative to the 'basePath'. -type Paths struct { - VendorExtension []*NamedAny `protobuf:"bytes,1,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` - Path []*NamedPathItem `protobuf:"bytes,2,rep,name=path" json:"path,omitempty"` -} - -func (m *Paths) Reset() { *m = Paths{} } -func (m *Paths) String() string { return proto.CompactTextString(m) } -func (*Paths) ProtoMessage() {} -func (*Paths) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{42} } - -func (m *Paths) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -func (m *Paths) GetPath() []*NamedPathItem { - if m != nil { - return m.Path - } - return nil -} - -type PrimitivesItems struct { - Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` - Format string `protobuf:"bytes,2,opt,name=format" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,3,opt,name=items" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,5,opt,name=default" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,6,opt,name=maximum" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,8,opt,name=minimum" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,10,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,11,opt,name=min_length,json=minLength" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,12,opt,name=pattern" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,13,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,14,opt,name=min_items,json=minItems" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,16,rep,name=enum" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,18,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *PrimitivesItems) Reset() { *m = PrimitivesItems{} } -func (m *PrimitivesItems) String() string { return proto.CompactTextString(m) } -func (*PrimitivesItems) ProtoMessage() {} -func (*PrimitivesItems) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{43} } - -func (m *PrimitivesItems) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *PrimitivesItems) GetFormat() string { - if m != nil { - return m.Format - } - return "" -} - -func (m *PrimitivesItems) GetItems() *PrimitivesItems { - if m != nil { - return m.Items - } - return nil -} - -func (m *PrimitivesItems) GetCollectionFormat() string { - if m != nil { - return m.CollectionFormat - } - return "" -} - -func (m *PrimitivesItems) GetDefault() *Any { - if m != nil { - return m.Default - } - return nil -} - -func (m *PrimitivesItems) GetMaximum() float64 { - if m != nil { - return m.Maximum - } - return 0 -} - -func (m *PrimitivesItems) GetExclusiveMaximum() bool { - if m != nil { - return m.ExclusiveMaximum - } - return false -} - -func (m *PrimitivesItems) GetMinimum() float64 { - if m != nil { - return m.Minimum - } - return 0 -} - -func (m *PrimitivesItems) GetExclusiveMinimum() bool { - if m != nil { - return m.ExclusiveMinimum - } - return false -} - -func (m *PrimitivesItems) GetMaxLength() int64 { - if m != nil { - return m.MaxLength - } - return 0 -} - -func (m *PrimitivesItems) GetMinLength() int64 { - if m != nil { - return m.MinLength - } - return 0 -} - -func (m *PrimitivesItems) GetPattern() string { - if m != nil { - return m.Pattern - } - return "" -} - -func (m *PrimitivesItems) GetMaxItems() int64 { - if m != nil { - return m.MaxItems - } - return 0 -} - -func (m *PrimitivesItems) GetMinItems() int64 { - if m != nil { - return m.MinItems - } - return 0 -} - -func (m *PrimitivesItems) GetUniqueItems() bool { - if m != nil { - return m.UniqueItems - } - return false -} - -func (m *PrimitivesItems) GetEnum() []*Any { - if m != nil { - return m.Enum - } - return nil -} - -func (m *PrimitivesItems) GetMultipleOf() float64 { - if m != nil { - return m.MultipleOf - } - return 0 -} - -func (m *PrimitivesItems) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type Properties struct { - AdditionalProperties []*NamedSchema `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` -} - -func (m *Properties) Reset() { *m = Properties{} } -func (m *Properties) String() string { return proto.CompactTextString(m) } -func (*Properties) ProtoMessage() {} -func (*Properties) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{44} } - -func (m *Properties) GetAdditionalProperties() []*NamedSchema { - if m != nil { - return m.AdditionalProperties - } - return nil -} - -type QueryParameterSubSchema struct { - // Determines whether or not this parameter is required or optional. - Required bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"` - // Determines the location of the parameter. - In string `protobuf:"bytes,2,opt,name=in" json:"in,omitempty"` - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` - // The name of the parameter. - Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` - // allows sending a parameter by name only or with an empty value. - AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue" json:"allow_empty_value,omitempty"` - Type string `protobuf:"bytes,6,opt,name=type" json:"type,omitempty"` - Format string `protobuf:"bytes,7,opt,name=format" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,10,opt,name=default" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,11,opt,name=maximum" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,13,opt,name=minimum" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,17,opt,name=pattern" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,21,rep,name=enum" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *QueryParameterSubSchema) Reset() { *m = QueryParameterSubSchema{} } -func (m *QueryParameterSubSchema) String() string { return proto.CompactTextString(m) } -func (*QueryParameterSubSchema) ProtoMessage() {} -func (*QueryParameterSubSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{45} } - -func (m *QueryParameterSubSchema) GetRequired() bool { - if m != nil { - return m.Required - } - return false -} - -func (m *QueryParameterSubSchema) GetIn() string { - if m != nil { - return m.In - } - return "" -} - -func (m *QueryParameterSubSchema) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *QueryParameterSubSchema) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *QueryParameterSubSchema) GetAllowEmptyValue() bool { - if m != nil { - return m.AllowEmptyValue - } - return false -} - -func (m *QueryParameterSubSchema) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *QueryParameterSubSchema) GetFormat() string { - if m != nil { - return m.Format - } - return "" -} - -func (m *QueryParameterSubSchema) GetItems() *PrimitivesItems { - if m != nil { - return m.Items - } - return nil -} - -func (m *QueryParameterSubSchema) GetCollectionFormat() string { - if m != nil { - return m.CollectionFormat - } - return "" -} - -func (m *QueryParameterSubSchema) GetDefault() *Any { - if m != nil { - return m.Default - } - return nil -} - -func (m *QueryParameterSubSchema) GetMaximum() float64 { - if m != nil { - return m.Maximum - } - return 0 -} - -func (m *QueryParameterSubSchema) GetExclusiveMaximum() bool { - if m != nil { - return m.ExclusiveMaximum - } - return false -} - -func (m *QueryParameterSubSchema) GetMinimum() float64 { - if m != nil { - return m.Minimum - } - return 0 -} - -func (m *QueryParameterSubSchema) GetExclusiveMinimum() bool { - if m != nil { - return m.ExclusiveMinimum - } - return false -} - -func (m *QueryParameterSubSchema) GetMaxLength() int64 { - if m != nil { - return m.MaxLength - } - return 0 -} - -func (m *QueryParameterSubSchema) GetMinLength() int64 { - if m != nil { - return m.MinLength - } - return 0 -} - -func (m *QueryParameterSubSchema) GetPattern() string { - if m != nil { - return m.Pattern - } - return "" -} - -func (m *QueryParameterSubSchema) GetMaxItems() int64 { - if m != nil { - return m.MaxItems - } - return 0 -} - -func (m *QueryParameterSubSchema) GetMinItems() int64 { - if m != nil { - return m.MinItems - } - return 0 -} - -func (m *QueryParameterSubSchema) GetUniqueItems() bool { - if m != nil { - return m.UniqueItems - } - return false -} - -func (m *QueryParameterSubSchema) GetEnum() []*Any { - if m != nil { - return m.Enum - } - return nil -} - -func (m *QueryParameterSubSchema) GetMultipleOf() float64 { - if m != nil { - return m.MultipleOf - } - return 0 -} - -func (m *QueryParameterSubSchema) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type Response struct { - Description string `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"` - Schema *SchemaItem `protobuf:"bytes,2,opt,name=schema" json:"schema,omitempty"` - Headers *Headers `protobuf:"bytes,3,opt,name=headers" json:"headers,omitempty"` - Examples *Examples `protobuf:"bytes,4,opt,name=examples" json:"examples,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *Response) Reset() { *m = Response{} } -func (m *Response) String() string { return proto.CompactTextString(m) } -func (*Response) ProtoMessage() {} -func (*Response) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{46} } - -func (m *Response) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Response) GetSchema() *SchemaItem { - if m != nil { - return m.Schema - } - return nil -} - -func (m *Response) GetHeaders() *Headers { - if m != nil { - return m.Headers - } - return nil -} - -func (m *Response) GetExamples() *Examples { - if m != nil { - return m.Examples - } - return nil -} - -func (m *Response) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -// One or more JSON representations for parameters -type ResponseDefinitions struct { - AdditionalProperties []*NamedResponse `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` -} - -func (m *ResponseDefinitions) Reset() { *m = ResponseDefinitions{} } -func (m *ResponseDefinitions) String() string { return proto.CompactTextString(m) } -func (*ResponseDefinitions) ProtoMessage() {} -func (*ResponseDefinitions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{47} } - -func (m *ResponseDefinitions) GetAdditionalProperties() []*NamedResponse { - if m != nil { - return m.AdditionalProperties - } - return nil -} - -type ResponseValue struct { - // Types that are valid to be assigned to Oneof: - // *ResponseValue_Response - // *ResponseValue_JsonReference - Oneof isResponseValue_Oneof `protobuf_oneof:"oneof"` -} - -func (m *ResponseValue) Reset() { *m = ResponseValue{} } -func (m *ResponseValue) String() string { return proto.CompactTextString(m) } -func (*ResponseValue) ProtoMessage() {} -func (*ResponseValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{48} } - -type isResponseValue_Oneof interface { - isResponseValue_Oneof() -} - -type ResponseValue_Response struct { - Response *Response `protobuf:"bytes,1,opt,name=response,oneof"` -} -type ResponseValue_JsonReference struct { - JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,oneof"` -} - -func (*ResponseValue_Response) isResponseValue_Oneof() {} -func (*ResponseValue_JsonReference) isResponseValue_Oneof() {} - -func (m *ResponseValue) GetOneof() isResponseValue_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (m *ResponseValue) GetResponse() *Response { - if x, ok := m.GetOneof().(*ResponseValue_Response); ok { - return x.Response - } - return nil -} - -func (m *ResponseValue) GetJsonReference() *JsonReference { - if x, ok := m.GetOneof().(*ResponseValue_JsonReference); ok { - return x.JsonReference - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*ResponseValue) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _ResponseValue_OneofMarshaler, _ResponseValue_OneofUnmarshaler, _ResponseValue_OneofSizer, []interface{}{ - (*ResponseValue_Response)(nil), - (*ResponseValue_JsonReference)(nil), - } -} - -func _ResponseValue_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*ResponseValue) - // oneof - switch x := m.Oneof.(type) { - case *ResponseValue_Response: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Response); err != nil { - return err - } - case *ResponseValue_JsonReference: - b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.JsonReference); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("ResponseValue.Oneof has unexpected type %T", x) - } - return nil -} - -func _ResponseValue_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*ResponseValue) - switch tag { - case 1: // oneof.response - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Response) - err := b.DecodeMessage(msg) - m.Oneof = &ResponseValue_Response{msg} - return true, err - case 2: // oneof.json_reference - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(JsonReference) - err := b.DecodeMessage(msg) - m.Oneof = &ResponseValue_JsonReference{msg} - return true, err - default: - return false, nil - } -} - -func _ResponseValue_OneofSizer(msg proto.Message) (n int) { - m := msg.(*ResponseValue) - // oneof - switch x := m.Oneof.(type) { - case *ResponseValue_Response: - s := proto.Size(x.Response) - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *ResponseValue_JsonReference: - s := proto.Size(x.JsonReference) - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -// Response objects names can either be any valid HTTP status code or 'default'. -type Responses struct { - ResponseCode []*NamedResponseValue `protobuf:"bytes,1,rep,name=response_code,json=responseCode" json:"response_code,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,2,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *Responses) Reset() { *m = Responses{} } -func (m *Responses) String() string { return proto.CompactTextString(m) } -func (*Responses) ProtoMessage() {} -func (*Responses) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{49} } - -func (m *Responses) GetResponseCode() []*NamedResponseValue { - if m != nil { - return m.ResponseCode - } - return nil -} - -func (m *Responses) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -// A deterministic version of a JSON Schema object. -type Schema struct { - XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref" json:"_ref,omitempty"` - Format string `protobuf:"bytes,2,opt,name=format" json:"format,omitempty"` - Title string `protobuf:"bytes,3,opt,name=title" json:"title,omitempty"` - Description string `protobuf:"bytes,4,opt,name=description" json:"description,omitempty"` - Default *Any `protobuf:"bytes,5,opt,name=default" json:"default,omitempty"` - MultipleOf float64 `protobuf:"fixed64,6,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` - Maximum float64 `protobuf:"fixed64,7,opt,name=maximum" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,8,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,9,opt,name=minimum" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,10,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,11,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,12,opt,name=min_length,json=minLength" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,13,opt,name=pattern" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,14,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,15,opt,name=min_items,json=minItems" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,16,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` - MaxProperties int64 `protobuf:"varint,17,opt,name=max_properties,json=maxProperties" json:"max_properties,omitempty"` - MinProperties int64 `protobuf:"varint,18,opt,name=min_properties,json=minProperties" json:"min_properties,omitempty"` - Required []string `protobuf:"bytes,19,rep,name=required" json:"required,omitempty"` - Enum []*Any `protobuf:"bytes,20,rep,name=enum" json:"enum,omitempty"` - AdditionalProperties *AdditionalPropertiesItem `protobuf:"bytes,21,opt,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` - Type *TypeItem `protobuf:"bytes,22,opt,name=type" json:"type,omitempty"` - Items *ItemsItem `protobuf:"bytes,23,opt,name=items" json:"items,omitempty"` - AllOf []*Schema `protobuf:"bytes,24,rep,name=all_of,json=allOf" json:"all_of,omitempty"` - Properties *Properties `protobuf:"bytes,25,opt,name=properties" json:"properties,omitempty"` - Discriminator string `protobuf:"bytes,26,opt,name=discriminator" json:"discriminator,omitempty"` - ReadOnly bool `protobuf:"varint,27,opt,name=read_only,json=readOnly" json:"read_only,omitempty"` - Xml *Xml `protobuf:"bytes,28,opt,name=xml" json:"xml,omitempty"` - ExternalDocs *ExternalDocs `protobuf:"bytes,29,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` - Example *Any `protobuf:"bytes,30,opt,name=example" json:"example,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,31,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *Schema) Reset() { *m = Schema{} } -func (m *Schema) String() string { return proto.CompactTextString(m) } -func (*Schema) ProtoMessage() {} -func (*Schema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{50} } - -func (m *Schema) GetXRef() string { - if m != nil { - return m.XRef - } - return "" -} - -func (m *Schema) GetFormat() string { - if m != nil { - return m.Format - } - return "" -} - -func (m *Schema) GetTitle() string { - if m != nil { - return m.Title - } - return "" -} - -func (m *Schema) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Schema) GetDefault() *Any { - if m != nil { - return m.Default - } - return nil -} - -func (m *Schema) GetMultipleOf() float64 { - if m != nil { - return m.MultipleOf - } - return 0 -} - -func (m *Schema) GetMaximum() float64 { - if m != nil { - return m.Maximum - } - return 0 -} - -func (m *Schema) GetExclusiveMaximum() bool { - if m != nil { - return m.ExclusiveMaximum - } - return false -} - -func (m *Schema) GetMinimum() float64 { - if m != nil { - return m.Minimum - } - return 0 -} - -func (m *Schema) GetExclusiveMinimum() bool { - if m != nil { - return m.ExclusiveMinimum - } - return false -} - -func (m *Schema) GetMaxLength() int64 { - if m != nil { - return m.MaxLength - } - return 0 -} - -func (m *Schema) GetMinLength() int64 { - if m != nil { - return m.MinLength - } - return 0 -} - -func (m *Schema) GetPattern() string { - if m != nil { - return m.Pattern - } - return "" -} - -func (m *Schema) GetMaxItems() int64 { - if m != nil { - return m.MaxItems - } - return 0 -} - -func (m *Schema) GetMinItems() int64 { - if m != nil { - return m.MinItems - } - return 0 -} - -func (m *Schema) GetUniqueItems() bool { - if m != nil { - return m.UniqueItems - } - return false -} - -func (m *Schema) GetMaxProperties() int64 { - if m != nil { - return m.MaxProperties - } - return 0 -} - -func (m *Schema) GetMinProperties() int64 { - if m != nil { - return m.MinProperties - } - return 0 -} - -func (m *Schema) GetRequired() []string { - if m != nil { - return m.Required - } - return nil -} - -func (m *Schema) GetEnum() []*Any { - if m != nil { - return m.Enum - } - return nil -} - -func (m *Schema) GetAdditionalProperties() *AdditionalPropertiesItem { - if m != nil { - return m.AdditionalProperties - } - return nil -} - -func (m *Schema) GetType() *TypeItem { - if m != nil { - return m.Type - } - return nil -} - -func (m *Schema) GetItems() *ItemsItem { - if m != nil { - return m.Items - } - return nil -} - -func (m *Schema) GetAllOf() []*Schema { - if m != nil { - return m.AllOf - } - return nil -} - -func (m *Schema) GetProperties() *Properties { - if m != nil { - return m.Properties - } - return nil -} - -func (m *Schema) GetDiscriminator() string { - if m != nil { - return m.Discriminator - } - return "" -} - -func (m *Schema) GetReadOnly() bool { - if m != nil { - return m.ReadOnly - } - return false -} - -func (m *Schema) GetXml() *Xml { - if m != nil { - return m.Xml - } - return nil -} - -func (m *Schema) GetExternalDocs() *ExternalDocs { - if m != nil { - return m.ExternalDocs - } - return nil -} - -func (m *Schema) GetExample() *Any { - if m != nil { - return m.Example - } - return nil -} - -func (m *Schema) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type SchemaItem struct { - // Types that are valid to be assigned to Oneof: - // *SchemaItem_Schema - // *SchemaItem_FileSchema - Oneof isSchemaItem_Oneof `protobuf_oneof:"oneof"` -} - -func (m *SchemaItem) Reset() { *m = SchemaItem{} } -func (m *SchemaItem) String() string { return proto.CompactTextString(m) } -func (*SchemaItem) ProtoMessage() {} -func (*SchemaItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{51} } - -type isSchemaItem_Oneof interface { - isSchemaItem_Oneof() -} - -type SchemaItem_Schema struct { - Schema *Schema `protobuf:"bytes,1,opt,name=schema,oneof"` -} -type SchemaItem_FileSchema struct { - FileSchema *FileSchema `protobuf:"bytes,2,opt,name=file_schema,json=fileSchema,oneof"` -} - -func (*SchemaItem_Schema) isSchemaItem_Oneof() {} -func (*SchemaItem_FileSchema) isSchemaItem_Oneof() {} - -func (m *SchemaItem) GetOneof() isSchemaItem_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (m *SchemaItem) GetSchema() *Schema { - if x, ok := m.GetOneof().(*SchemaItem_Schema); ok { - return x.Schema - } - return nil -} - -func (m *SchemaItem) GetFileSchema() *FileSchema { - if x, ok := m.GetOneof().(*SchemaItem_FileSchema); ok { - return x.FileSchema - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*SchemaItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _SchemaItem_OneofMarshaler, _SchemaItem_OneofUnmarshaler, _SchemaItem_OneofSizer, []interface{}{ - (*SchemaItem_Schema)(nil), - (*SchemaItem_FileSchema)(nil), - } -} - -func _SchemaItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*SchemaItem) - // oneof - switch x := m.Oneof.(type) { - case *SchemaItem_Schema: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Schema); err != nil { - return err - } - case *SchemaItem_FileSchema: - b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.FileSchema); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("SchemaItem.Oneof has unexpected type %T", x) - } - return nil -} - -func _SchemaItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*SchemaItem) - switch tag { - case 1: // oneof.schema - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Schema) - err := b.DecodeMessage(msg) - m.Oneof = &SchemaItem_Schema{msg} - return true, err - case 2: // oneof.file_schema - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(FileSchema) - err := b.DecodeMessage(msg) - m.Oneof = &SchemaItem_FileSchema{msg} - return true, err - default: - return false, nil - } -} - -func _SchemaItem_OneofSizer(msg proto.Message) (n int) { - m := msg.(*SchemaItem) - // oneof - switch x := m.Oneof.(type) { - case *SchemaItem_Schema: - s := proto.Size(x.Schema) - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *SchemaItem_FileSchema: - s := proto.Size(x.FileSchema) - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -type SecurityDefinitions struct { - AdditionalProperties []*NamedSecurityDefinitionsItem `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` -} - -func (m *SecurityDefinitions) Reset() { *m = SecurityDefinitions{} } -func (m *SecurityDefinitions) String() string { return proto.CompactTextString(m) } -func (*SecurityDefinitions) ProtoMessage() {} -func (*SecurityDefinitions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{52} } - -func (m *SecurityDefinitions) GetAdditionalProperties() []*NamedSecurityDefinitionsItem { - if m != nil { - return m.AdditionalProperties - } - return nil -} - -type SecurityDefinitionsItem struct { - // Types that are valid to be assigned to Oneof: - // *SecurityDefinitionsItem_BasicAuthenticationSecurity - // *SecurityDefinitionsItem_ApiKeySecurity - // *SecurityDefinitionsItem_Oauth2ImplicitSecurity - // *SecurityDefinitionsItem_Oauth2PasswordSecurity - // *SecurityDefinitionsItem_Oauth2ApplicationSecurity - // *SecurityDefinitionsItem_Oauth2AccessCodeSecurity - Oneof isSecurityDefinitionsItem_Oneof `protobuf_oneof:"oneof"` -} - -func (m *SecurityDefinitionsItem) Reset() { *m = SecurityDefinitionsItem{} } -func (m *SecurityDefinitionsItem) String() string { return proto.CompactTextString(m) } -func (*SecurityDefinitionsItem) ProtoMessage() {} -func (*SecurityDefinitionsItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{53} } - -type isSecurityDefinitionsItem_Oneof interface { - isSecurityDefinitionsItem_Oneof() -} - -type SecurityDefinitionsItem_BasicAuthenticationSecurity struct { - BasicAuthenticationSecurity *BasicAuthenticationSecurity `protobuf:"bytes,1,opt,name=basic_authentication_security,json=basicAuthenticationSecurity,oneof"` -} -type SecurityDefinitionsItem_ApiKeySecurity struct { - ApiKeySecurity *ApiKeySecurity `protobuf:"bytes,2,opt,name=api_key_security,json=apiKeySecurity,oneof"` -} -type SecurityDefinitionsItem_Oauth2ImplicitSecurity struct { - Oauth2ImplicitSecurity *Oauth2ImplicitSecurity `protobuf:"bytes,3,opt,name=oauth2_implicit_security,json=oauth2ImplicitSecurity,oneof"` -} -type SecurityDefinitionsItem_Oauth2PasswordSecurity struct { - Oauth2PasswordSecurity *Oauth2PasswordSecurity `protobuf:"bytes,4,opt,name=oauth2_password_security,json=oauth2PasswordSecurity,oneof"` -} -type SecurityDefinitionsItem_Oauth2ApplicationSecurity struct { - Oauth2ApplicationSecurity *Oauth2ApplicationSecurity `protobuf:"bytes,5,opt,name=oauth2_application_security,json=oauth2ApplicationSecurity,oneof"` -} -type SecurityDefinitionsItem_Oauth2AccessCodeSecurity struct { - Oauth2AccessCodeSecurity *Oauth2AccessCodeSecurity `protobuf:"bytes,6,opt,name=oauth2_access_code_security,json=oauth2AccessCodeSecurity,oneof"` -} - -func (*SecurityDefinitionsItem_BasicAuthenticationSecurity) isSecurityDefinitionsItem_Oneof() {} -func (*SecurityDefinitionsItem_ApiKeySecurity) isSecurityDefinitionsItem_Oneof() {} -func (*SecurityDefinitionsItem_Oauth2ImplicitSecurity) isSecurityDefinitionsItem_Oneof() {} -func (*SecurityDefinitionsItem_Oauth2PasswordSecurity) isSecurityDefinitionsItem_Oneof() {} -func (*SecurityDefinitionsItem_Oauth2ApplicationSecurity) isSecurityDefinitionsItem_Oneof() {} -func (*SecurityDefinitionsItem_Oauth2AccessCodeSecurity) isSecurityDefinitionsItem_Oneof() {} - -func (m *SecurityDefinitionsItem) GetOneof() isSecurityDefinitionsItem_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (m *SecurityDefinitionsItem) GetBasicAuthenticationSecurity() *BasicAuthenticationSecurity { - if x, ok := m.GetOneof().(*SecurityDefinitionsItem_BasicAuthenticationSecurity); ok { - return x.BasicAuthenticationSecurity - } - return nil -} - -func (m *SecurityDefinitionsItem) GetApiKeySecurity() *ApiKeySecurity { - if x, ok := m.GetOneof().(*SecurityDefinitionsItem_ApiKeySecurity); ok { - return x.ApiKeySecurity - } - return nil -} - -func (m *SecurityDefinitionsItem) GetOauth2ImplicitSecurity() *Oauth2ImplicitSecurity { - if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2ImplicitSecurity); ok { - return x.Oauth2ImplicitSecurity - } - return nil -} - -func (m *SecurityDefinitionsItem) GetOauth2PasswordSecurity() *Oauth2PasswordSecurity { - if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2PasswordSecurity); ok { - return x.Oauth2PasswordSecurity - } - return nil -} - -func (m *SecurityDefinitionsItem) GetOauth2ApplicationSecurity() *Oauth2ApplicationSecurity { - if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2ApplicationSecurity); ok { - return x.Oauth2ApplicationSecurity - } - return nil -} - -func (m *SecurityDefinitionsItem) GetOauth2AccessCodeSecurity() *Oauth2AccessCodeSecurity { - if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2AccessCodeSecurity); ok { - return x.Oauth2AccessCodeSecurity - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*SecurityDefinitionsItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _SecurityDefinitionsItem_OneofMarshaler, _SecurityDefinitionsItem_OneofUnmarshaler, _SecurityDefinitionsItem_OneofSizer, []interface{}{ - (*SecurityDefinitionsItem_BasicAuthenticationSecurity)(nil), - (*SecurityDefinitionsItem_ApiKeySecurity)(nil), - (*SecurityDefinitionsItem_Oauth2ImplicitSecurity)(nil), - (*SecurityDefinitionsItem_Oauth2PasswordSecurity)(nil), - (*SecurityDefinitionsItem_Oauth2ApplicationSecurity)(nil), - (*SecurityDefinitionsItem_Oauth2AccessCodeSecurity)(nil), - } -} - -func _SecurityDefinitionsItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*SecurityDefinitionsItem) - // oneof - switch x := m.Oneof.(type) { - case *SecurityDefinitionsItem_BasicAuthenticationSecurity: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.BasicAuthenticationSecurity); err != nil { - return err - } - case *SecurityDefinitionsItem_ApiKeySecurity: - b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.ApiKeySecurity); err != nil { - return err - } - case *SecurityDefinitionsItem_Oauth2ImplicitSecurity: - b.EncodeVarint(3<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Oauth2ImplicitSecurity); err != nil { - return err - } - case *SecurityDefinitionsItem_Oauth2PasswordSecurity: - b.EncodeVarint(4<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Oauth2PasswordSecurity); err != nil { - return err - } - case *SecurityDefinitionsItem_Oauth2ApplicationSecurity: - b.EncodeVarint(5<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Oauth2ApplicationSecurity); err != nil { - return err - } - case *SecurityDefinitionsItem_Oauth2AccessCodeSecurity: - b.EncodeVarint(6<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Oauth2AccessCodeSecurity); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("SecurityDefinitionsItem.Oneof has unexpected type %T", x) - } - return nil -} - -func _SecurityDefinitionsItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*SecurityDefinitionsItem) - switch tag { - case 1: // oneof.basic_authentication_security - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(BasicAuthenticationSecurity) - err := b.DecodeMessage(msg) - m.Oneof = &SecurityDefinitionsItem_BasicAuthenticationSecurity{msg} - return true, err - case 2: // oneof.api_key_security - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(ApiKeySecurity) - err := b.DecodeMessage(msg) - m.Oneof = &SecurityDefinitionsItem_ApiKeySecurity{msg} - return true, err - case 3: // oneof.oauth2_implicit_security - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Oauth2ImplicitSecurity) - err := b.DecodeMessage(msg) - m.Oneof = &SecurityDefinitionsItem_Oauth2ImplicitSecurity{msg} - return true, err - case 4: // oneof.oauth2_password_security - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Oauth2PasswordSecurity) - err := b.DecodeMessage(msg) - m.Oneof = &SecurityDefinitionsItem_Oauth2PasswordSecurity{msg} - return true, err - case 5: // oneof.oauth2_application_security - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Oauth2ApplicationSecurity) - err := b.DecodeMessage(msg) - m.Oneof = &SecurityDefinitionsItem_Oauth2ApplicationSecurity{msg} - return true, err - case 6: // oneof.oauth2_access_code_security - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Oauth2AccessCodeSecurity) - err := b.DecodeMessage(msg) - m.Oneof = &SecurityDefinitionsItem_Oauth2AccessCodeSecurity{msg} - return true, err - default: - return false, nil - } -} - -func _SecurityDefinitionsItem_OneofSizer(msg proto.Message) (n int) { - m := msg.(*SecurityDefinitionsItem) - // oneof - switch x := m.Oneof.(type) { - case *SecurityDefinitionsItem_BasicAuthenticationSecurity: - s := proto.Size(x.BasicAuthenticationSecurity) - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *SecurityDefinitionsItem_ApiKeySecurity: - s := proto.Size(x.ApiKeySecurity) - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *SecurityDefinitionsItem_Oauth2ImplicitSecurity: - s := proto.Size(x.Oauth2ImplicitSecurity) - n += proto.SizeVarint(3<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *SecurityDefinitionsItem_Oauth2PasswordSecurity: - s := proto.Size(x.Oauth2PasswordSecurity) - n += proto.SizeVarint(4<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *SecurityDefinitionsItem_Oauth2ApplicationSecurity: - s := proto.Size(x.Oauth2ApplicationSecurity) - n += proto.SizeVarint(5<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *SecurityDefinitionsItem_Oauth2AccessCodeSecurity: - s := proto.Size(x.Oauth2AccessCodeSecurity) - n += proto.SizeVarint(6<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -type SecurityRequirement struct { - AdditionalProperties []*NamedStringArray `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` -} - -func (m *SecurityRequirement) Reset() { *m = SecurityRequirement{} } -func (m *SecurityRequirement) String() string { return proto.CompactTextString(m) } -func (*SecurityRequirement) ProtoMessage() {} -func (*SecurityRequirement) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{54} } - -func (m *SecurityRequirement) GetAdditionalProperties() []*NamedStringArray { - if m != nil { - return m.AdditionalProperties - } - return nil -} - -type StringArray struct { - Value []string `protobuf:"bytes,1,rep,name=value" json:"value,omitempty"` -} - -func (m *StringArray) Reset() { *m = StringArray{} } -func (m *StringArray) String() string { return proto.CompactTextString(m) } -func (*StringArray) ProtoMessage() {} -func (*StringArray) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{55} } - -func (m *StringArray) GetValue() []string { - if m != nil { - return m.Value - } - return nil -} - -type Tag struct { - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` - ExternalDocs *ExternalDocs `protobuf:"bytes,3,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *Tag) Reset() { *m = Tag{} } -func (m *Tag) String() string { return proto.CompactTextString(m) } -func (*Tag) ProtoMessage() {} -func (*Tag) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{56} } - -func (m *Tag) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Tag) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Tag) GetExternalDocs() *ExternalDocs { - if m != nil { - return m.ExternalDocs - } - return nil -} - -func (m *Tag) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -type TypeItem struct { - Value []string `protobuf:"bytes,1,rep,name=value" json:"value,omitempty"` -} - -func (m *TypeItem) Reset() { *m = TypeItem{} } -func (m *TypeItem) String() string { return proto.CompactTextString(m) } -func (*TypeItem) ProtoMessage() {} -func (*TypeItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{57} } - -func (m *TypeItem) GetValue() []string { - if m != nil { - return m.Value - } - return nil -} - -// Any property starting with x- is valid. -type VendorExtension struct { - AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` -} - -func (m *VendorExtension) Reset() { *m = VendorExtension{} } -func (m *VendorExtension) String() string { return proto.CompactTextString(m) } -func (*VendorExtension) ProtoMessage() {} -func (*VendorExtension) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{58} } - -func (m *VendorExtension) GetAdditionalProperties() []*NamedAny { - if m != nil { - return m.AdditionalProperties - } - return nil -} - -type Xml struct { - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Namespace string `protobuf:"bytes,2,opt,name=namespace" json:"namespace,omitempty"` - Prefix string `protobuf:"bytes,3,opt,name=prefix" json:"prefix,omitempty"` - Attribute bool `protobuf:"varint,4,opt,name=attribute" json:"attribute,omitempty"` - Wrapped bool `protobuf:"varint,5,opt,name=wrapped" json:"wrapped,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *Xml) Reset() { *m = Xml{} } -func (m *Xml) String() string { return proto.CompactTextString(m) } -func (*Xml) ProtoMessage() {} -func (*Xml) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{59} } - -func (m *Xml) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Xml) GetNamespace() string { - if m != nil { - return m.Namespace - } - return "" -} - -func (m *Xml) GetPrefix() string { - if m != nil { - return m.Prefix - } - return "" -} - -func (m *Xml) GetAttribute() bool { - if m != nil { - return m.Attribute - } - return false -} - -func (m *Xml) GetWrapped() bool { - if m != nil { - return m.Wrapped - } - return false -} - -func (m *Xml) GetVendorExtension() []*NamedAny { - if m != nil { - return m.VendorExtension - } - return nil -} - -func init() { - proto.RegisterType((*AdditionalPropertiesItem)(nil), "openapi.v2.AdditionalPropertiesItem") - proto.RegisterType((*Any)(nil), "openapi.v2.Any") - proto.RegisterType((*ApiKeySecurity)(nil), "openapi.v2.ApiKeySecurity") - proto.RegisterType((*BasicAuthenticationSecurity)(nil), "openapi.v2.BasicAuthenticationSecurity") - proto.RegisterType((*BodyParameter)(nil), "openapi.v2.BodyParameter") - proto.RegisterType((*Contact)(nil), "openapi.v2.Contact") - proto.RegisterType((*Default)(nil), "openapi.v2.Default") - proto.RegisterType((*Definitions)(nil), "openapi.v2.Definitions") - proto.RegisterType((*Document)(nil), "openapi.v2.Document") - proto.RegisterType((*Examples)(nil), "openapi.v2.Examples") - proto.RegisterType((*ExternalDocs)(nil), "openapi.v2.ExternalDocs") - proto.RegisterType((*FileSchema)(nil), "openapi.v2.FileSchema") - proto.RegisterType((*FormDataParameterSubSchema)(nil), "openapi.v2.FormDataParameterSubSchema") - proto.RegisterType((*Header)(nil), "openapi.v2.Header") - proto.RegisterType((*HeaderParameterSubSchema)(nil), "openapi.v2.HeaderParameterSubSchema") - proto.RegisterType((*Headers)(nil), "openapi.v2.Headers") - proto.RegisterType((*Info)(nil), "openapi.v2.Info") - proto.RegisterType((*ItemsItem)(nil), "openapi.v2.ItemsItem") - proto.RegisterType((*JsonReference)(nil), "openapi.v2.JsonReference") - proto.RegisterType((*License)(nil), "openapi.v2.License") - proto.RegisterType((*NamedAny)(nil), "openapi.v2.NamedAny") - proto.RegisterType((*NamedHeader)(nil), "openapi.v2.NamedHeader") - proto.RegisterType((*NamedParameter)(nil), "openapi.v2.NamedParameter") - proto.RegisterType((*NamedPathItem)(nil), "openapi.v2.NamedPathItem") - proto.RegisterType((*NamedResponse)(nil), "openapi.v2.NamedResponse") - proto.RegisterType((*NamedResponseValue)(nil), "openapi.v2.NamedResponseValue") - proto.RegisterType((*NamedSchema)(nil), "openapi.v2.NamedSchema") - proto.RegisterType((*NamedSecurityDefinitionsItem)(nil), "openapi.v2.NamedSecurityDefinitionsItem") - proto.RegisterType((*NamedString)(nil), "openapi.v2.NamedString") - proto.RegisterType((*NamedStringArray)(nil), "openapi.v2.NamedStringArray") - proto.RegisterType((*NonBodyParameter)(nil), "openapi.v2.NonBodyParameter") - proto.RegisterType((*Oauth2AccessCodeSecurity)(nil), "openapi.v2.Oauth2AccessCodeSecurity") - proto.RegisterType((*Oauth2ApplicationSecurity)(nil), "openapi.v2.Oauth2ApplicationSecurity") - proto.RegisterType((*Oauth2ImplicitSecurity)(nil), "openapi.v2.Oauth2ImplicitSecurity") - proto.RegisterType((*Oauth2PasswordSecurity)(nil), "openapi.v2.Oauth2PasswordSecurity") - proto.RegisterType((*Oauth2Scopes)(nil), "openapi.v2.Oauth2Scopes") - proto.RegisterType((*Operation)(nil), "openapi.v2.Operation") - proto.RegisterType((*Parameter)(nil), "openapi.v2.Parameter") - proto.RegisterType((*ParameterDefinitions)(nil), "openapi.v2.ParameterDefinitions") - proto.RegisterType((*ParametersItem)(nil), "openapi.v2.ParametersItem") - proto.RegisterType((*PathItem)(nil), "openapi.v2.PathItem") - proto.RegisterType((*PathParameterSubSchema)(nil), "openapi.v2.PathParameterSubSchema") - proto.RegisterType((*Paths)(nil), "openapi.v2.Paths") - proto.RegisterType((*PrimitivesItems)(nil), "openapi.v2.PrimitivesItems") - proto.RegisterType((*Properties)(nil), "openapi.v2.Properties") - proto.RegisterType((*QueryParameterSubSchema)(nil), "openapi.v2.QueryParameterSubSchema") - proto.RegisterType((*Response)(nil), "openapi.v2.Response") - proto.RegisterType((*ResponseDefinitions)(nil), "openapi.v2.ResponseDefinitions") - proto.RegisterType((*ResponseValue)(nil), "openapi.v2.ResponseValue") - proto.RegisterType((*Responses)(nil), "openapi.v2.Responses") - proto.RegisterType((*Schema)(nil), "openapi.v2.Schema") - proto.RegisterType((*SchemaItem)(nil), "openapi.v2.SchemaItem") - proto.RegisterType((*SecurityDefinitions)(nil), "openapi.v2.SecurityDefinitions") - proto.RegisterType((*SecurityDefinitionsItem)(nil), "openapi.v2.SecurityDefinitionsItem") - proto.RegisterType((*SecurityRequirement)(nil), "openapi.v2.SecurityRequirement") - proto.RegisterType((*StringArray)(nil), "openapi.v2.StringArray") - proto.RegisterType((*Tag)(nil), "openapi.v2.Tag") - proto.RegisterType((*TypeItem)(nil), "openapi.v2.TypeItem") - proto.RegisterType((*VendorExtension)(nil), "openapi.v2.VendorExtension") - proto.RegisterType((*Xml)(nil), "openapi.v2.Xml") -} - -func init() { proto.RegisterFile("OpenAPIv2/OpenAPIv2.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 3129 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x3b, 0x4b, 0x73, 0x1c, 0x57, - 0xd5, 0xf3, 0x7e, 0x1c, 0x69, 0x46, 0xa3, 0x96, 0x2c, 0xb7, 0x24, 0xc7, 0x71, 0xe4, 0x3c, 0x6c, - 0xe7, 0xb3, 0x9c, 0x4f, 0x29, 0x48, 0x05, 0x2a, 0x05, 0xf2, 0xab, 0xc6, 0xc4, 0x44, 0x4a, 0xcb, - 0x0e, 0x09, 0x04, 0xba, 0xae, 0x66, 0xee, 0x48, 0x9d, 0x74, 0xf7, 0x6d, 0x77, 0xf7, 0xc8, 0x1a, - 0x16, 0x2c, 0xa0, 0x8a, 0x35, 0x50, 0x59, 0x53, 0x15, 0x16, 0x14, 0x55, 0x59, 0xb0, 0x62, 0xc5, - 0x1f, 0x60, 0xc7, 0x3f, 0x60, 0x0d, 0x5b, 0xaa, 0x58, 0x51, 0x3c, 0xea, 0xbe, 0xfa, 0x31, 0x7d, - 0x7b, 0x1e, 0x96, 0x0b, 0x28, 0xd0, 0x6a, 0xe6, 0xde, 0x73, 0xee, 0xb9, 0xa7, 0x4f, 0x9f, 0xd7, - 0x3d, 0xe7, 0x36, 0xac, 0xef, 0x79, 0xd8, 0xdd, 0xdd, 0x7f, 0x70, 0xb2, 0x73, 0x2b, 0xfa, 0xb7, - 0xed, 0xf9, 0x24, 0x24, 0x1a, 0x10, 0x0f, 0xbb, 0xc8, 0xb3, 0xb6, 0x4f, 0x76, 0x36, 0xd6, 0x8f, - 0x08, 0x39, 0xb2, 0xf1, 0x2d, 0x06, 0x39, 0x1c, 0x0e, 0x6e, 0x21, 0x77, 0xc4, 0xd1, 0xb6, 0x1c, - 0xd0, 0x77, 0xfb, 0x7d, 0x2b, 0xb4, 0x88, 0x8b, 0xec, 0x7d, 0x9f, 0x78, 0xd8, 0x0f, 0x2d, 0x1c, - 0x3c, 0x08, 0xb1, 0xa3, 0xfd, 0x1f, 0xd4, 0x82, 0xde, 0x31, 0x76, 0x90, 0x5e, 0xbc, 0x52, 0xbc, - 0xb6, 0xb0, 0xa3, 0x6d, 0xc7, 0x34, 0xb7, 0x0f, 0x18, 0xa4, 0x5b, 0x30, 0x04, 0x8e, 0xb6, 0x01, - 0xf5, 0x43, 0x42, 0x6c, 0x8c, 0x5c, 0xbd, 0x74, 0xa5, 0x78, 0xad, 0xd1, 0x2d, 0x18, 0x72, 0xe2, - 0x76, 0x1d, 0xaa, 0xc4, 0xc5, 0x64, 0xb0, 0x75, 0x0f, 0xca, 0xbb, 0xee, 0x48, 0xbb, 0x01, 0xd5, - 0x13, 0x64, 0x0f, 0xb1, 0x20, 0xbc, 0xba, 0xcd, 0x19, 0xdc, 0x96, 0x0c, 0x6e, 0xef, 0xba, 0x23, - 0x83, 0xa3, 0x68, 0x1a, 0x54, 0x46, 0xc8, 0xb1, 0x19, 0xd1, 0xa6, 0xc1, 0xfe, 0x6f, 0x7d, 0x51, - 0x84, 0xf6, 0xae, 0x67, 0xbd, 0x8b, 0x47, 0x07, 0xb8, 0x37, 0xf4, 0xad, 0x70, 0x44, 0xd1, 0xc2, - 0x91, 0xc7, 0x29, 0x36, 0x0d, 0xf6, 0x9f, 0xce, 0xb9, 0xc8, 0xc1, 0x72, 0x29, 0xfd, 0xaf, 0xb5, - 0xa1, 0x64, 0xb9, 0x7a, 0x99, 0xcd, 0x94, 0x2c, 0x57, 0xbb, 0x02, 0x0b, 0x7d, 0x1c, 0xf4, 0x7c, - 0xcb, 0xa3, 0x32, 0xd0, 0x2b, 0x0c, 0x90, 0x9c, 0xd2, 0xbe, 0x06, 0x9d, 0x13, 0xec, 0xf6, 0x89, - 0x6f, 0xe2, 0xd3, 0x10, 0xbb, 0x01, 0x45, 0xab, 0x5e, 0x29, 0x33, 0xbe, 0x13, 0x02, 0x79, 0x0f, - 0x39, 0xb8, 0x4f, 0xf9, 0x5e, 0xe2, 0xd8, 0xf7, 0x24, 0xf2, 0xd6, 0x67, 0x45, 0xd8, 0xbc, 0x8d, - 0x02, 0xab, 0xb7, 0x3b, 0x0c, 0x8f, 0xb1, 0x1b, 0x5a, 0x3d, 0x44, 0x09, 0x4f, 0x64, 0x7d, 0x8c, - 0xad, 0xd2, 0x6c, 0x6c, 0x95, 0xe7, 0x61, 0xeb, 0x0f, 0x45, 0x68, 0xdd, 0x26, 0xfd, 0xd1, 0x3e, - 0xf2, 0x91, 0x83, 0x43, 0xec, 0x8f, 0x6f, 0x5a, 0xcc, 0x6e, 0x3a, 0x8b, 0x44, 0x37, 0xa0, 0xe1, - 0xe3, 0x27, 0x43, 0xcb, 0xc7, 0x7d, 0x26, 0xce, 0x86, 0x11, 0x8d, 0xb5, 0x1b, 0x91, 0x4a, 0x55, - 0xf3, 0x54, 0x2a, 0x52, 0x28, 0xd5, 0x03, 0xd6, 0xe6, 0x79, 0xc0, 0x1f, 0x17, 0xa1, 0x7e, 0x87, - 0xb8, 0x21, 0xea, 0x85, 0x11, 0xe3, 0xc5, 0x04, 0xe3, 0x1d, 0x28, 0x0f, 0x7d, 0xa9, 0x58, 0xf4, - 0xaf, 0xb6, 0x0a, 0x55, 0xec, 0x20, 0xcb, 0x16, 0x4f, 0xc3, 0x07, 0x4a, 0x46, 0x2a, 0xf3, 0x30, - 0xf2, 0x08, 0xea, 0x77, 0xf1, 0x00, 0x0d, 0xed, 0x50, 0x7b, 0x00, 0x17, 0x50, 0x64, 0x6f, 0xa6, - 0x17, 0x19, 0x9c, 0x5e, 0x9c, 0x40, 0x70, 0x15, 0x29, 0x4c, 0x74, 0xeb, 0x3b, 0xb0, 0x70, 0x17, - 0x0f, 0x2c, 0x97, 0x41, 0x02, 0xed, 0xe1, 0x64, 0xca, 0x17, 0x33, 0x94, 0x85, 0xb8, 0xd5, 0xc4, - 0xff, 0x58, 0x85, 0xc6, 0x5d, 0xd2, 0x1b, 0x3a, 0xd8, 0x0d, 0x35, 0x1d, 0xea, 0xc1, 0x53, 0x74, - 0x74, 0x84, 0x7d, 0x21, 0x3f, 0x39, 0xd4, 0x5e, 0x86, 0x8a, 0xe5, 0x0e, 0x08, 0x93, 0xe1, 0xc2, - 0x4e, 0x27, 0xb9, 0xc7, 0x03, 0x77, 0x40, 0x0c, 0x06, 0xa5, 0xc2, 0x3f, 0x26, 0x41, 0x28, 0xa4, - 0xca, 0xfe, 0x6b, 0x9b, 0xd0, 0x3c, 0x44, 0x01, 0x36, 0x3d, 0x14, 0x1e, 0x0b, 0xab, 0x6b, 0xd0, - 0x89, 0x7d, 0x14, 0x1e, 0xb3, 0x0d, 0x29, 0x77, 0x38, 0x60, 0x96, 0x46, 0x37, 0xe4, 0x43, 0xaa, - 0x5c, 0x3d, 0xe2, 0x06, 0x43, 0x0a, 0xaa, 0x31, 0x50, 0x34, 0xa6, 0x30, 0xcf, 0x27, 0xfd, 0x61, - 0x0f, 0x07, 0x7a, 0x9d, 0xc3, 0xe4, 0x58, 0x7b, 0x0d, 0xaa, 0x74, 0xa7, 0x40, 0x6f, 0x30, 0x4e, - 0x97, 0x93, 0x9c, 0xd2, 0x2d, 0x03, 0x83, 0xc3, 0xb5, 0xb7, 0xa9, 0x0d, 0x44, 0x52, 0xd5, 0x9b, - 0x0c, 0x3d, 0x25, 0xbc, 0x84, 0xd0, 0x8d, 0x24, 0xae, 0xf6, 0x75, 0x00, 0x4f, 0xda, 0x52, 0xa0, - 0x03, 0x5b, 0x79, 0x25, 0xbd, 0x91, 0x80, 0x26, 0x49, 0x24, 0xd6, 0x68, 0xef, 0x40, 0xd3, 0xc7, - 0x81, 0x47, 0xdc, 0x00, 0x07, 0xfa, 0x02, 0x23, 0xf0, 0x62, 0x92, 0x80, 0x21, 0x80, 0xc9, 0xf5, - 0xf1, 0x0a, 0xed, 0xab, 0xd0, 0x08, 0x84, 0x53, 0xd1, 0x17, 0xd9, 0x5b, 0x4f, 0xad, 0x96, 0x0e, - 0xc7, 0xe0, 0xd6, 0x48, 0x5f, 0xad, 0x11, 0x2d, 0xd0, 0x0c, 0x58, 0x95, 0xff, 0xcd, 0xa4, 0x04, - 0x5a, 0x59, 0x36, 0x24, 0xa1, 0x24, 0x1b, 0x2b, 0x41, 0x76, 0x52, 0xbb, 0x0a, 0x95, 0x10, 0x1d, - 0x05, 0x7a, 0x9b, 0x31, 0xb3, 0x94, 0xa4, 0xf1, 0x08, 0x1d, 0x19, 0x0c, 0xa8, 0xbd, 0x03, 0x2d, - 0x6a, 0x57, 0x3e, 0x55, 0xdb, 0x3e, 0xe9, 0x05, 0xfa, 0x12, 0xdb, 0x51, 0x4f, 0x62, 0xdf, 0x13, - 0x08, 0x77, 0x49, 0x2f, 0x30, 0x16, 0x71, 0x62, 0xa4, 0xb4, 0xce, 0xce, 0x3c, 0xd6, 0xf9, 0x18, - 0x1a, 0xf7, 0x4e, 0x91, 0xe3, 0xd9, 0x38, 0x78, 0x9e, 0xe6, 0xf9, 0xa3, 0x22, 0x2c, 0x26, 0xd9, - 0x9e, 0xc1, 0xbb, 0x66, 0x1d, 0xd2, 0x99, 0x9d, 0xfc, 0x3f, 0x4a, 0x00, 0xf7, 0x2d, 0x1b, 0x73, - 0x63, 0xd7, 0xd6, 0xa0, 0x36, 0x20, 0xbe, 0x83, 0x42, 0xb1, 0xbd, 0x18, 0x51, 0xc7, 0x17, 0x5a, - 0xa1, 0x2d, 0x1d, 0x3b, 0x1f, 0x8c, 0x73, 0x5c, 0xce, 0x72, 0x7c, 0x1d, 0xea, 0x7d, 0xee, 0xd9, - 0x98, 0x0d, 0x8f, 0xbd, 0x63, 0xca, 0x91, 0x84, 0xa7, 0xc2, 0x02, 0x37, 0xea, 0x38, 0x2c, 0xc8, - 0x08, 0x58, 0x4b, 0x44, 0xc0, 0x4d, 0x6a, 0x0b, 0xa8, 0x6f, 0x12, 0xd7, 0x1e, 0xe9, 0x75, 0x19, - 0x47, 0x50, 0x7f, 0xcf, 0xb5, 0x47, 0x59, 0x9d, 0x69, 0xcc, 0xa5, 0x33, 0xd7, 0xa1, 0x8e, 0xf9, - 0x2b, 0x17, 0x06, 0x9e, 0x65, 0x5b, 0xc0, 0x95, 0x6f, 0x00, 0xe6, 0x79, 0x03, 0x5f, 0xd4, 0x60, - 0xe3, 0x3e, 0xf1, 0x9d, 0xbb, 0x28, 0x44, 0x91, 0x03, 0x38, 0x18, 0x1e, 0x1e, 0xc8, 0xb4, 0x29, - 0x16, 0x4b, 0x71, 0x2c, 0x5a, 0xf2, 0xc8, 0x5a, 0xca, 0xcb, 0x55, 0xca, 0xf9, 0xf1, 0xb9, 0x92, - 0x08, 0x73, 0x37, 0x60, 0x19, 0xd9, 0x36, 0x79, 0x6a, 0x62, 0xc7, 0x0b, 0x47, 0x26, 0x4f, 0xbc, - 0xaa, 0x6c, 0xab, 0x25, 0x06, 0xb8, 0x47, 0xe7, 0x3f, 0x90, 0xc9, 0x56, 0xe6, 0x45, 0xc4, 0x3a, - 0x53, 0x4f, 0xe9, 0xcc, 0xff, 0x43, 0xd5, 0x0a, 0xb1, 0x23, 0x65, 0xbf, 0x99, 0xf2, 0x74, 0xbe, - 0xe5, 0x58, 0xa1, 0x75, 0xc2, 0x33, 0xc9, 0xc0, 0xe0, 0x98, 0xda, 0xeb, 0xb0, 0xdc, 0x23, 0xb6, - 0x8d, 0x7b, 0x94, 0x59, 0x53, 0x50, 0x6d, 0x32, 0xaa, 0x9d, 0x18, 0x70, 0x9f, 0xd3, 0x4f, 0xe8, - 0x16, 0x4c, 0xd1, 0x2d, 0x1d, 0xea, 0x0e, 0x3a, 0xb5, 0x9c, 0xa1, 0xc3, 0xbc, 0x66, 0xd1, 0x90, - 0x43, 0xba, 0x23, 0x3e, 0xed, 0xd9, 0xc3, 0xc0, 0x3a, 0xc1, 0xa6, 0xc4, 0x59, 0x64, 0x0f, 0xdf, - 0x89, 0x00, 0xdf, 0x14, 0xc8, 0x94, 0x8c, 0xe5, 0x32, 0x94, 0x96, 0x20, 0xc3, 0x87, 0x63, 0x64, - 0x04, 0x4e, 0x7b, 0x9c, 0x8c, 0x40, 0x7e, 0x01, 0xc0, 0x41, 0xa7, 0xa6, 0x8d, 0xdd, 0xa3, 0xf0, - 0x98, 0x79, 0xb3, 0xb2, 0xd1, 0x74, 0xd0, 0xe9, 0x43, 0x36, 0xc1, 0xc0, 0x96, 0x2b, 0xc1, 0x1d, - 0x01, 0xb6, 0x5c, 0x01, 0xd6, 0xa1, 0xee, 0xa1, 0x90, 0x2a, 0xab, 0xbe, 0xcc, 0x83, 0xad, 0x18, - 0x52, 0x8b, 0xa0, 0x74, 0xb9, 0xd0, 0x35, 0xb6, 0xae, 0xe1, 0xa0, 0x53, 0x26, 0x61, 0x06, 0xb4, - 0x5c, 0x01, 0x5c, 0x11, 0x40, 0xcb, 0xe5, 0xc0, 0x97, 0x60, 0x71, 0xe8, 0x5a, 0x4f, 0x86, 0x58, - 0xc0, 0x57, 0x19, 0xe7, 0x0b, 0x7c, 0x8e, 0xa3, 0x5c, 0x85, 0x0a, 0x76, 0x87, 0x8e, 0x7e, 0x21, - 0xeb, 0xaa, 0xa9, 0xa8, 0x19, 0x50, 0x7b, 0x11, 0x16, 0x9c, 0xa1, 0x1d, 0x5a, 0x9e, 0x8d, 0x4d, - 0x32, 0xd0, 0xd7, 0x98, 0x90, 0x40, 0x4e, 0xed, 0x0d, 0x94, 0xd6, 0x72, 0x71, 0x2e, 0x6b, 0xa9, - 0x42, 0xad, 0x8b, 0x51, 0x1f, 0xfb, 0xca, 0xb4, 0x38, 0xd6, 0xc5, 0x92, 0x5a, 0x17, 0xcb, 0x67, - 0xd3, 0xc5, 0xca, 0x74, 0x5d, 0xac, 0xce, 0xae, 0x8b, 0xb5, 0x19, 0x74, 0xb1, 0x3e, 0x5d, 0x17, - 0x1b, 0x33, 0xe8, 0x62, 0x73, 0x26, 0x5d, 0x84, 0xc9, 0xba, 0xb8, 0x30, 0x41, 0x17, 0x17, 0x27, - 0xe8, 0x62, 0x6b, 0x92, 0x2e, 0xb6, 0xa7, 0xe8, 0xe2, 0x52, 0xbe, 0x2e, 0x76, 0xe6, 0xd0, 0xc5, - 0xe5, 0x8c, 0x2e, 0x8e, 0x79, 0x4b, 0x6d, 0xb6, 0x23, 0xd4, 0xca, 0x3c, 0xda, 0xfa, 0xb7, 0x2a, - 0xe8, 0x5c, 0x5b, 0xff, 0x2d, 0x9e, 0x5d, 0x5a, 0x48, 0x55, 0x69, 0x21, 0x35, 0xb5, 0x85, 0xd4, - 0xcf, 0x66, 0x21, 0x8d, 0xe9, 0x16, 0xd2, 0x9c, 0xdd, 0x42, 0x60, 0x06, 0x0b, 0x59, 0x98, 0x6e, - 0x21, 0x8b, 0x33, 0x58, 0x48, 0x6b, 0x26, 0x0b, 0x69, 0x4f, 0xb6, 0x90, 0xa5, 0x09, 0x16, 0xd2, - 0x99, 0x60, 0x21, 0xcb, 0x93, 0x2c, 0x44, 0x9b, 0x62, 0x21, 0x2b, 0xf9, 0x16, 0xb2, 0x3a, 0x87, - 0x85, 0x5c, 0x98, 0xc9, 0x5b, 0xaf, 0xcd, 0xa3, 0xff, 0xdf, 0x82, 0x3a, 0x57, 0xff, 0x67, 0x38, - 0x7e, 0xf2, 0x85, 0x39, 0xc9, 0xf3, 0xe7, 0x25, 0xa8, 0xd0, 0x03, 0x64, 0x9c, 0x98, 0x16, 0x93, - 0x89, 0xa9, 0x0e, 0xf5, 0x13, 0xec, 0x07, 0x71, 0x65, 0x44, 0x0e, 0x67, 0x30, 0xa4, 0x6b, 0xd0, - 0x09, 0xb1, 0xef, 0x04, 0x26, 0x19, 0x98, 0x01, 0xf6, 0x4f, 0xac, 0x9e, 0x34, 0xaa, 0x36, 0x9b, - 0xdf, 0x1b, 0x1c, 0xf0, 0x59, 0xed, 0x26, 0xd4, 0x7b, 0xbc, 0x7c, 0x20, 0x9c, 0xfe, 0x4a, 0xf2, - 0x21, 0x44, 0x65, 0xc1, 0x90, 0x38, 0x14, 0xdd, 0xb6, 0x7a, 0xd8, 0x0d, 0x78, 0xfa, 0x34, 0x86, - 0xfe, 0x90, 0x83, 0x0c, 0x89, 0xa3, 0x14, 0x7e, 0x7d, 0x1e, 0xe1, 0xbf, 0x05, 0x4d, 0xa6, 0x0c, - 0xac, 0x56, 0x77, 0x23, 0x51, 0xab, 0x2b, 0x4f, 0x2e, 0xac, 0x6c, 0xdd, 0x85, 0xd6, 0x37, 0x02, - 0xe2, 0x1a, 0x78, 0x80, 0x7d, 0xec, 0xf6, 0xb0, 0xb6, 0x0c, 0x15, 0xd3, 0xc7, 0x03, 0x21, 0xe3, - 0xb2, 0x81, 0x07, 0xd3, 0xeb, 0x4f, 0x5b, 0x1e, 0xd4, 0xc5, 0x33, 0xcd, 0x58, 0x5c, 0x39, 0xf3, - 0x59, 0xe6, 0x1e, 0x34, 0x24, 0x50, 0xb9, 0xe5, 0x2b, 0xb2, 0xaa, 0x58, 0x52, 0x3b, 0x20, 0x0e, - 0xdd, 0x7a, 0x17, 0x16, 0x12, 0x0a, 0xa8, 0xa4, 0x74, 0x2d, 0x4d, 0x29, 0x25, 0x4c, 0xa1, 0xb7, - 0x82, 0xd8, 0xfb, 0xd0, 0x66, 0xc4, 0xe2, 0x22, 0x9a, 0x8a, 0xde, 0xeb, 0x69, 0x7a, 0x17, 0x94, - 0x45, 0x01, 0x49, 0x72, 0x0f, 0x5a, 0x82, 0x64, 0x78, 0xcc, 0xde, 0xad, 0x8a, 0xe2, 0x8d, 0x34, - 0xc5, 0xd5, 0xf1, 0x7a, 0x06, 0x5d, 0x38, 0x4e, 0x50, 0x56, 0x0f, 0xe6, 0x26, 0x28, 0x17, 0x4a, - 0x82, 0x1f, 0x81, 0x96, 0x22, 0x18, 0x9d, 0x1d, 0x32, 0x54, 0x6f, 0xa5, 0xa9, 0xae, 0xab, 0xa8, - 0xb2, 0xd5, 0xe3, 0x2f, 0x47, 0xc4, 0xd0, 0x79, 0x5f, 0x8e, 0xd0, 0x74, 0x41, 0xcc, 0x81, 0x4b, - 0x9c, 0x58, 0xb6, 0x34, 0x91, 0x2b, 0xd8, 0xb7, 0xd3, 0xd4, 0xaf, 0x4e, 0xa9, 0x7b, 0x24, 0xe5, - 0xfc, 0x96, 0xe4, 0x3d, 0xf4, 0x2d, 0xf7, 0x48, 0x49, 0x7d, 0x35, 0x49, 0xbd, 0x29, 0x17, 0x3e, - 0x86, 0x4e, 0x62, 0xe1, 0xae, 0xef, 0x23, 0xb5, 0x82, 0xdf, 0x4c, 0xf3, 0x96, 0xf2, 0xa9, 0x89, - 0xb5, 0x92, 0xec, 0x6f, 0xca, 0xd0, 0x79, 0x8f, 0xb8, 0xe9, 0x1a, 0x2f, 0x86, 0xcd, 0x63, 0xa6, - 0xc1, 0x66, 0x54, 0x77, 0x32, 0x83, 0xe1, 0xa1, 0x99, 0xaa, 0xf4, 0xbf, 0x9c, 0x55, 0xf8, 0x6c, - 0x82, 0xd3, 0x2d, 0x18, 0xfa, 0x71, 0x5e, 0xf2, 0x63, 0xc3, 0x65, 0x9a, 0x30, 0x98, 0x7d, 0x14, - 0x22, 0xf5, 0x4e, 0xfc, 0x19, 0x5e, 0x4d, 0xee, 0x94, 0x7f, 0x4c, 0xee, 0x16, 0x8c, 0x8d, 0x41, - 0xfe, 0x21, 0xfa, 0x10, 0x36, 0x9e, 0x0c, 0xb1, 0x3f, 0x52, 0xef, 0x54, 0xce, 0xbe, 0xc9, 0xf7, - 0x29, 0xb6, 0x72, 0x9b, 0x8b, 0x4f, 0xd4, 0x20, 0xcd, 0x84, 0x75, 0x0f, 0x85, 0xc7, 0xea, 0x2d, - 0x78, 0xf1, 0x63, 0x6b, 0xdc, 0x0a, 0x95, 0x3b, 0xac, 0x79, 0x4a, 0x48, 0xdc, 0x24, 0xf9, 0xbc, - 0x04, 0xfa, 0x1e, 0x1a, 0x86, 0xc7, 0x3b, 0xbb, 0xbd, 0x1e, 0x0e, 0x82, 0x3b, 0xa4, 0x8f, 0xa7, - 0xf5, 0x39, 0x06, 0x36, 0x79, 0x2a, 0xab, 0xf2, 0xf4, 0xbf, 0xf6, 0x06, 0x0d, 0x08, 0xc4, 0xc3, - 0xf2, 0x48, 0x94, 0x2a, 0x8d, 0x70, 0xea, 0x07, 0x0c, 0x6e, 0x08, 0x3c, 0x9a, 0x35, 0xd1, 0x69, - 0xe2, 0x5b, 0xdf, 0x67, 0xfd, 0x09, 0x93, 0xfa, 0x6f, 0x71, 0x20, 0x4a, 0x01, 0x1e, 0xfb, 0x36, - 0x4d, 0x60, 0x42, 0xf2, 0x29, 0xe6, 0x48, 0x3c, 0xff, 0x6c, 0xb0, 0x09, 0x0a, 0x1c, 0x0b, 0x1e, - 0xb5, 0xd9, 0x32, 0xef, 0xb9, 0x82, 0xdf, 0x5f, 0x8a, 0xb0, 0x2e, 0x64, 0xe4, 0x79, 0xf6, 0x2c, - 0x1d, 0x95, 0xe7, 0x23, 0xa4, 0xd4, 0x73, 0x57, 0x26, 0x3f, 0x77, 0x75, 0xb6, 0xe7, 0x9e, 0xab, - 0xa7, 0xf1, 0xc3, 0x12, 0xac, 0x71, 0xc6, 0x1e, 0x38, 0xf4, 0xb9, 0xad, 0xf0, 0x3f, 0x4d, 0x33, - 0xfe, 0x05, 0x42, 0xf8, 0x73, 0x51, 0x0a, 0x61, 0x1f, 0x05, 0xc1, 0x53, 0xe2, 0xf7, 0xff, 0x07, - 0xde, 0xfc, 0xc7, 0xb0, 0x98, 0xe4, 0xeb, 0x19, 0xfa, 0x3d, 0x2c, 0x42, 0xe4, 0x24, 0xdc, 0x3f, - 0xaf, 0x40, 0x73, 0xcf, 0xc3, 0x3e, 0x92, 0x87, 0x4d, 0x56, 0xb7, 0x2f, 0xb2, 0x3a, 0x2d, 0x2f, - 0xd3, 0xeb, 0x50, 0x0f, 0x86, 0x8e, 0x83, 0xfc, 0x91, 0xcc, 0xb9, 0xc5, 0x70, 0x86, 0x9c, 0x3b, - 0x53, 0xae, 0xad, 0xcc, 0x55, 0xae, 0x7d, 0x09, 0x16, 0x89, 0xe4, 0xcd, 0xb4, 0xfa, 0x52, 0xbc, - 0xd1, 0xdc, 0x83, 0x7e, 0xaa, 0xf7, 0x53, 0x1b, 0xeb, 0xfd, 0x24, 0x7b, 0x46, 0xf5, 0xb1, 0x9e, - 0xd1, 0x57, 0x52, 0x3d, 0x9b, 0x06, 0x13, 0xdd, 0x86, 0x32, 0x3d, 0xe3, 0xa1, 0x3e, 0xd9, 0xad, - 0x79, 0x33, 0xd9, 0xad, 0x69, 0x66, 0x33, 0x3b, 0x99, 0xe0, 0xa4, 0x7a, 0x34, 0x89, 0xd6, 0x16, - 0xa4, 0x5b, 0x5b, 0x97, 0x01, 0xfa, 0xd8, 0xf3, 0x71, 0x0f, 0x85, 0xb8, 0x2f, 0x4e, 0xbd, 0x89, - 0x99, 0xb3, 0x75, 0x77, 0x54, 0xea, 0xd7, 0x9a, 0x47, 0xfd, 0x7e, 0x59, 0x84, 0x66, 0x9c, 0x45, - 0xdc, 0x86, 0xf6, 0x21, 0xe9, 0x27, 0xe2, 0xad, 0x48, 0x1c, 0x52, 0x09, 0x5e, 0x2a, 0xf1, 0xe8, - 0x16, 0x8c, 0xd6, 0x61, 0x2a, 0x13, 0x79, 0x08, 0x9a, 0x4b, 0x5c, 0x73, 0x8c, 0x0e, 0x4f, 0x0b, - 0x2e, 0xa5, 0x98, 0x1a, 0xcb, 0x61, 0xba, 0x05, 0xa3, 0xe3, 0x8e, 0xcd, 0xc5, 0xd1, 0xf3, 0x08, - 0x56, 0x55, 0x7d, 0x36, 0x6d, 0x6f, 0xb2, 0xbd, 0x6c, 0x64, 0xc4, 0x10, 0x27, 0xe6, 0x6a, 0x93, - 0xf9, 0xac, 0x08, 0xed, 0xb4, 0x76, 0x68, 0x5f, 0x82, 0xe6, 0xb8, 0x44, 0xd4, 0xb9, 0x7e, 0xb7, - 0x60, 0xc4, 0x98, 0x54, 0x9a, 0x9f, 0x04, 0xc4, 0xa5, 0x67, 0x30, 0x7e, 0x22, 0x53, 0xa5, 0xcb, - 0xa9, 0x23, 0x1b, 0x95, 0xe6, 0x27, 0xc9, 0x89, 0xf8, 0xf9, 0x7f, 0x5f, 0x86, 0x46, 0x74, 0x74, - 0x50, 0x9c, 0xec, 0x5e, 0x83, 0xf2, 0x11, 0x0e, 0x55, 0x27, 0x91, 0xc8, 0xfe, 0x0d, 0x8a, 0x41, - 0x11, 0xbd, 0x61, 0x28, 0xfc, 0x63, 0x1e, 0xa2, 0x37, 0x0c, 0xb5, 0xeb, 0x50, 0xf1, 0x48, 0x20, - 0x3b, 0x40, 0x39, 0x98, 0x0c, 0x45, 0xbb, 0x09, 0xb5, 0x3e, 0xb6, 0x71, 0x88, 0xc5, 0x89, 0x3a, - 0x07, 0x59, 0x20, 0x69, 0xb7, 0xa0, 0x4e, 0x3c, 0xde, 0x86, 0xac, 0x4d, 0xc2, 0x97, 0x58, 0x94, - 0x15, 0x9a, 0x92, 0x8a, 0x22, 0x57, 0x1e, 0x2b, 0x14, 0x85, 0x9e, 0xc9, 0x3c, 0x14, 0xf6, 0x8e, - 0x45, 0xfb, 0x22, 0x07, 0x97, 0xe3, 0x8c, 0xb9, 0x89, 0xe6, 0x5c, 0x6e, 0xe2, 0xcc, 0x1d, 0xa4, - 0xbf, 0x56, 0x61, 0x4d, 0x9d, 0x4d, 0x9e, 0xd7, 0x18, 0xcf, 0x6b, 0x8c, 0xff, 0xed, 0x35, 0xc6, - 0xa7, 0x50, 0x65, 0x17, 0x34, 0x94, 0x94, 0x8a, 0x73, 0x50, 0xd2, 0x6e, 0x42, 0x85, 0xdd, 0x36, - 0x29, 0xb1, 0x45, 0xeb, 0x0a, 0x87, 0x2f, 0xea, 0x26, 0x0c, 0x6d, 0xeb, 0x67, 0x55, 0x58, 0x1a, - 0xd3, 0xda, 0xf3, 0x9e, 0xd4, 0x79, 0x4f, 0xea, 0x4c, 0x3d, 0x29, 0x95, 0x0e, 0x6b, 0xf3, 0x58, - 0xc3, 0xb7, 0x01, 0xe2, 0x14, 0xe4, 0x39, 0xdf, 0xf9, 0xfa, 0x55, 0x0d, 0x2e, 0xe6, 0x14, 0x46, - 0xce, 0xaf, 0x29, 0x9c, 0x5f, 0x53, 0x38, 0xbf, 0xa6, 0x10, 0x9b, 0xe1, 0xdf, 0x8b, 0xd0, 0x88, - 0xca, 0xe9, 0xd3, 0x2f, 0x76, 0x6d, 0x47, 0xdd, 0x19, 0x9e, 0x76, 0xaf, 0x65, 0x6b, 0xd6, 0x2c, - 0xf0, 0xc8, 0xab, 0xaf, 0x37, 0xa1, 0xce, 0x2b, 0xab, 0x32, 0x78, 0xac, 0x64, 0x0b, 0xb2, 0x81, - 0x21, 0x71, 0xb4, 0x37, 0xa0, 0x21, 0xae, 0x2b, 0xc9, 0x93, 0xf5, 0x6a, 0xfa, 0x64, 0xcd, 0x61, - 0x46, 0x84, 0x75, 0xf6, 0x3b, 0xcd, 0x18, 0x56, 0x14, 0x97, 0x11, 0xb5, 0xf7, 0x26, 0x3b, 0xa4, - 0x6c, 0xcc, 0x8d, 0x5a, 0x0b, 0x6a, 0x97, 0xf4, 0x93, 0x22, 0xb4, 0xd2, 0x5d, 0x86, 0x1d, 0xea, - 0x88, 0xf8, 0x44, 0x74, 0x7b, 0x5c, 0x71, 0xe6, 0xee, 0x16, 0x8c, 0x08, 0xef, 0xf9, 0x9e, 0xaf, - 0x7e, 0x5a, 0x84, 0x66, 0x74, 0xb2, 0xd7, 0xee, 0x40, 0x4b, 0x6e, 0x63, 0xf6, 0x48, 0x1f, 0x8b, - 0x07, 0xbd, 0x9c, 0xfb, 0xa0, 0xbc, 0xdb, 0xb1, 0x28, 0x17, 0xdd, 0x21, 0x7d, 0x75, 0x2b, 0xb0, - 0x34, 0xcf, 0xdb, 0xf8, 0x75, 0x13, 0x6a, 0xc2, 0x51, 0x2b, 0x4e, 0x7c, 0x79, 0x09, 0x4a, 0xd4, - 0x5b, 0x2d, 0x4f, 0xb8, 0xf4, 0x57, 0x99, 0x78, 0xe9, 0x6f, 0x5a, 0xe2, 0x31, 0x66, 0x89, 0xb5, - 0x8c, 0x25, 0x26, 0x5c, 0x62, 0x7d, 0x06, 0x97, 0xd8, 0x98, 0xee, 0x12, 0x9b, 0x33, 0xb8, 0x44, - 0x98, 0xc9, 0x25, 0x2e, 0x4c, 0x76, 0x89, 0x8b, 0x13, 0x5c, 0x62, 0x6b, 0x82, 0x4b, 0x6c, 0x4f, - 0x72, 0x89, 0x4b, 0x53, 0x5c, 0x62, 0x27, 0xeb, 0x12, 0x5f, 0x81, 0x36, 0x25, 0x9e, 0x30, 0x36, - 0x7e, 0x12, 0x68, 0x39, 0xe8, 0x34, 0x91, 0x2b, 0x50, 0x34, 0xcb, 0x4d, 0xa2, 0x69, 0x02, 0xcd, - 0x72, 0x13, 0x68, 0xc9, 0x40, 0xbf, 0x32, 0x76, 0x4d, 0x73, 0xa6, 0x13, 0xc1, 0x47, 0x79, 0x2e, - 0xe0, 0x42, 0xb6, 0xb5, 0x94, 0xf7, 0xe9, 0x89, 0xda, 0x1b, 0x68, 0xd7, 0x44, 0xd8, 0x5f, 0xcb, - 0xda, 0xfd, 0xa3, 0x91, 0x87, 0x79, 0xee, 0xce, 0x92, 0x81, 0xd7, 0x65, 0xd0, 0xbf, 0x98, 0x3d, - 0xdc, 0x47, 0x4d, 0x73, 0x19, 0xee, 0xaf, 0x43, 0x0d, 0xd9, 0x36, 0xd5, 0x4f, 0x3d, 0xb7, 0x77, - 0x5e, 0x45, 0xb6, 0xbd, 0x37, 0xd0, 0xbe, 0x0c, 0x90, 0x78, 0xa2, 0xf5, 0xac, 0x33, 0x8f, 0xb9, - 0x35, 0x12, 0x98, 0xda, 0xcb, 0xd0, 0xea, 0x5b, 0xd4, 0x82, 0x1c, 0xcb, 0x45, 0x21, 0xf1, 0xf5, - 0x0d, 0xa6, 0x20, 0xe9, 0xc9, 0xf4, 0x95, 0xd7, 0xcd, 0xb1, 0x2b, 0xaf, 0x2f, 0x41, 0xf9, 0xd4, - 0xb1, 0xf5, 0x4b, 0x59, 0x8b, 0xfb, 0xd0, 0xb1, 0x0d, 0x0a, 0xcb, 0x96, 0x59, 0x5f, 0x78, 0xd6, - 0x5b, 0xb1, 0x97, 0x9f, 0xe1, 0x56, 0xec, 0x8b, 0xf3, 0x78, 0xac, 0x1f, 0x00, 0xc4, 0x71, 0x6f, - 0xce, 0x2f, 0x8d, 0xde, 0x86, 0x85, 0x81, 0x65, 0x63, 0x33, 0x3f, 0xa4, 0xc6, 0x37, 0x9e, 0xbb, - 0x05, 0x03, 0x06, 0xd1, 0x28, 0xf6, 0xe2, 0x21, 0xac, 0x28, 0xba, 0xb9, 0xda, 0x77, 0x27, 0xc7, - 0xaf, 0x6b, 0xd9, 0x84, 0x3a, 0xa7, 0x25, 0xac, 0x0e, 0x67, 0x7f, 0xaa, 0xc0, 0xc5, 0xbc, 0x66, - 0xb4, 0x03, 0x2f, 0x1c, 0xa2, 0xc0, 0xea, 0x99, 0x28, 0xf5, 0x95, 0x90, 0x19, 0xd5, 0x7c, 0xb9, - 0x68, 0x5e, 0x4b, 0x55, 0x58, 0xf3, 0xbf, 0x2a, 0xea, 0x16, 0x8c, 0xcd, 0xc3, 0x09, 0x1f, 0x1d, - 0xdd, 0x87, 0x0e, 0xf2, 0x2c, 0xf3, 0x53, 0x3c, 0x8a, 0x77, 0xe0, 0x92, 0x4c, 0xd5, 0xb5, 0xd2, - 0x5f, 0x59, 0x75, 0x0b, 0x46, 0x1b, 0xa5, 0xbf, 0xbb, 0xfa, 0x1e, 0xe8, 0x84, 0xb5, 0x25, 0x4c, - 0x4b, 0x34, 0xa4, 0x62, 0x7a, 0xe5, 0x6c, 0x57, 0x54, 0xdd, 0xbb, 0xea, 0x16, 0x8c, 0x35, 0xa2, - 0xee, 0x6a, 0xc5, 0xf4, 0x3d, 0xd1, 0xeb, 0x89, 0xe9, 0x57, 0xf2, 0xe8, 0x8f, 0xb7, 0x85, 0x62, - 0xfa, 0x99, 0x86, 0xd1, 0x11, 0x6c, 0x0a, 0xfa, 0x28, 0x6e, 0x24, 0xc6, 0x5b, 0xf0, 0x00, 0xf7, - 0x4a, 0x76, 0x0b, 0x45, 0xdb, 0xb1, 0x5b, 0x30, 0xd6, 0x49, 0x6e, 0x4f, 0x12, 0xc7, 0x1b, 0xb1, - 0xae, 0x2e, 0x4b, 0x17, 0xe2, 0x8d, 0x6a, 0x59, 0xef, 0x98, 0xd7, 0x03, 0xee, 0x16, 0x0c, 0x21, - 0x93, 0x2c, 0x2c, 0xd6, 0xf0, 0xe3, 0x58, 0xc3, 0x13, 0x2d, 0x01, 0xed, 0xfd, 0xc9, 0x1a, 0x7e, - 0x29, 0xa7, 0x6d, 0xc4, 0x2f, 0x16, 0xa8, 0xb5, 0xfa, 0x2a, 0x2c, 0x24, 0x6f, 0x2e, 0xac, 0xc6, - 0x1f, 0xf7, 0x95, 0xe3, 0x3b, 0x0e, 0xbf, 0x2d, 0x42, 0xf9, 0x11, 0x52, 0xdf, 0x8a, 0x98, 0xfe, - 0xb1, 0x5b, 0xc6, 0xb3, 0x95, 0xcf, 0xfc, 0x8d, 0xc8, 0x5c, 0x5f, 0x70, 0x5d, 0x81, 0x86, 0x8c, - 0x30, 0x39, 0xcf, 0xf7, 0x31, 0x2c, 0x7d, 0x30, 0x56, 0x6f, 0x7a, 0x8e, 0x1f, 0x93, 0xfc, 0xae, - 0x08, 0xe5, 0x0f, 0x1d, 0x5b, 0x29, 0xbd, 0x4b, 0xd0, 0xa4, 0xbf, 0x81, 0x87, 0x7a, 0xf2, 0x5e, - 0x49, 0x3c, 0x41, 0x93, 0x3f, 0xcf, 0xc7, 0x03, 0xeb, 0x54, 0x64, 0x79, 0x62, 0x44, 0x57, 0xa1, - 0x30, 0xf4, 0xad, 0xc3, 0x61, 0x88, 0xc5, 0x67, 0x7a, 0xf1, 0x04, 0x4d, 0x65, 0x9e, 0xfa, 0xc8, - 0xf3, 0x70, 0x5f, 0x1c, 0xc1, 0xe5, 0xf0, 0xcc, 0x7d, 0xcc, 0xdb, 0xaf, 0x42, 0x9b, 0xf8, 0x47, - 0x12, 0xd7, 0x3c, 0xd9, 0xb9, 0xbd, 0x28, 0xbe, 0x5d, 0xdd, 0xf7, 0x49, 0x48, 0xf6, 0x8b, 0xbf, - 0x28, 0x95, 0xf7, 0x76, 0x0f, 0x0e, 0x6b, 0xec, 0x63, 0xd0, 0x37, 0xff, 0x19, 0x00, 0x00, 0xff, - 0xff, 0xd4, 0x0a, 0xef, 0xca, 0xe4, 0x3a, 0x00, 0x00, -} diff --git a/cmd/bpa-operator/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto b/cmd/bpa-operator/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto deleted file mode 100644 index 557c880..0000000 --- a/cmd/bpa-operator/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto +++ /dev/null @@ -1,663 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// THIS FILE IS AUTOMATICALLY GENERATED. - -syntax = "proto3"; - -package openapi.v2; - -import "google/protobuf/any.proto"; - -// This option lets the proto compiler generate Java code inside the package -// name (see below) instead of inside an outer class. It creates a simpler -// developer experience by reducing one-level of name nesting and be -// consistent with most programming languages that don't support outer classes. -option java_multiple_files = true; - -// The Java outer classname should be the filename in UpperCamelCase. This -// class is only used to hold proto descriptor, so developers don't need to -// work with it directly. -option java_outer_classname = "OpenAPIProto"; - -// The Java package name must be proto package name with proper prefix. -option java_package = "org.openapi_v2"; - -// A reasonable prefix for the Objective-C symbols generated from the package. -// It should at a minimum be 3 characters long, all uppercase, and convention -// is to use an abbreviation of the package name. Something short, but -// hopefully unique enough to not conflict with things that may come along in -// the future. 'GPB' is reserved for the protocol buffer implementation itself. -option objc_class_prefix = "OAS"; - -message AdditionalPropertiesItem { - oneof oneof { - Schema schema = 1; - bool boolean = 2; - } -} - -message Any { - google.protobuf.Any value = 1; - string yaml = 2; -} - -message ApiKeySecurity { - string type = 1; - string name = 2; - string in = 3; - string description = 4; - repeated NamedAny vendor_extension = 5; -} - -message BasicAuthenticationSecurity { - string type = 1; - string description = 2; - repeated NamedAny vendor_extension = 3; -} - -message BodyParameter { - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - string description = 1; - // The name of the parameter. - string name = 2; - // Determines the location of the parameter. - string in = 3; - // Determines whether or not this parameter is required or optional. - bool required = 4; - Schema schema = 5; - repeated NamedAny vendor_extension = 6; -} - -// Contact information for the owners of the API. -message Contact { - // The identifying name of the contact person/organization. - string name = 1; - // The URL pointing to the contact information. - string url = 2; - // The email address of the contact person/organization. - string email = 3; - repeated NamedAny vendor_extension = 4; -} - -message Default { - repeated NamedAny additional_properties = 1; -} - -// One or more JSON objects describing the schemas being consumed and produced by the API. -message Definitions { - repeated NamedSchema additional_properties = 1; -} - -message Document { - // The Swagger version of this document. - string swagger = 1; - Info info = 2; - // The host (name or ip) of the API. Example: 'swagger.io' - string host = 3; - // The base path to the API. Example: '/api'. - string base_path = 4; - // The transfer protocol of the API. - repeated string schemes = 5; - // A list of MIME types accepted by the API. - repeated string consumes = 6; - // A list of MIME types the API can produce. - repeated string produces = 7; - Paths paths = 8; - Definitions definitions = 9; - ParameterDefinitions parameters = 10; - ResponseDefinitions responses = 11; - repeated SecurityRequirement security = 12; - SecurityDefinitions security_definitions = 13; - repeated Tag tags = 14; - ExternalDocs external_docs = 15; - repeated NamedAny vendor_extension = 16; -} - -message Examples { - repeated NamedAny additional_properties = 1; -} - -// information about external documentation -message ExternalDocs { - string description = 1; - string url = 2; - repeated NamedAny vendor_extension = 3; -} - -// A deterministic version of a JSON Schema object. -message FileSchema { - string format = 1; - string title = 2; - string description = 3; - Any default = 4; - repeated string required = 5; - string type = 6; - bool read_only = 7; - ExternalDocs external_docs = 8; - Any example = 9; - repeated NamedAny vendor_extension = 10; -} - -message FormDataParameterSubSchema { - // Determines whether or not this parameter is required or optional. - bool required = 1; - // Determines the location of the parameter. - string in = 2; - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - string description = 3; - // The name of the parameter. - string name = 4; - // allows sending a parameter by name only or with an empty value. - bool allow_empty_value = 5; - string type = 6; - string format = 7; - PrimitivesItems items = 8; - string collection_format = 9; - Any default = 10; - double maximum = 11; - bool exclusive_maximum = 12; - double minimum = 13; - bool exclusive_minimum = 14; - int64 max_length = 15; - int64 min_length = 16; - string pattern = 17; - int64 max_items = 18; - int64 min_items = 19; - bool unique_items = 20; - repeated Any enum = 21; - double multiple_of = 22; - repeated NamedAny vendor_extension = 23; -} - -message Header { - string type = 1; - string format = 2; - PrimitivesItems items = 3; - string collection_format = 4; - Any default = 5; - double maximum = 6; - bool exclusive_maximum = 7; - double minimum = 8; - bool exclusive_minimum = 9; - int64 max_length = 10; - int64 min_length = 11; - string pattern = 12; - int64 max_items = 13; - int64 min_items = 14; - bool unique_items = 15; - repeated Any enum = 16; - double multiple_of = 17; - string description = 18; - repeated NamedAny vendor_extension = 19; -} - -message HeaderParameterSubSchema { - // Determines whether or not this parameter is required or optional. - bool required = 1; - // Determines the location of the parameter. - string in = 2; - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - string description = 3; - // The name of the parameter. - string name = 4; - string type = 5; - string format = 6; - PrimitivesItems items = 7; - string collection_format = 8; - Any default = 9; - double maximum = 10; - bool exclusive_maximum = 11; - double minimum = 12; - bool exclusive_minimum = 13; - int64 max_length = 14; - int64 min_length = 15; - string pattern = 16; - int64 max_items = 17; - int64 min_items = 18; - bool unique_items = 19; - repeated Any enum = 20; - double multiple_of = 21; - repeated NamedAny vendor_extension = 22; -} - -message Headers { - repeated NamedHeader additional_properties = 1; -} - -// General information about the API. -message Info { - // A unique and precise title of the API. - string title = 1; - // A semantic version number of the API. - string version = 2; - // A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed. - string description = 3; - // The terms of service for the API. - string terms_of_service = 4; - Contact contact = 5; - License license = 6; - repeated NamedAny vendor_extension = 7; -} - -message ItemsItem { - repeated Schema schema = 1; -} - -message JsonReference { - string _ref = 1; - string description = 2; -} - -message License { - // The name of the license type. It's encouraged to use an OSI compatible license. - string name = 1; - // The URL pointing to the license. - string url = 2; - repeated NamedAny vendor_extension = 3; -} - -// Automatically-generated message used to represent maps of Any as ordered (name,value) pairs. -message NamedAny { - // Map key - string name = 1; - // Mapped value - Any value = 2; -} - -// Automatically-generated message used to represent maps of Header as ordered (name,value) pairs. -message NamedHeader { - // Map key - string name = 1; - // Mapped value - Header value = 2; -} - -// Automatically-generated message used to represent maps of Parameter as ordered (name,value) pairs. -message NamedParameter { - // Map key - string name = 1; - // Mapped value - Parameter value = 2; -} - -// Automatically-generated message used to represent maps of PathItem as ordered (name,value) pairs. -message NamedPathItem { - // Map key - string name = 1; - // Mapped value - PathItem value = 2; -} - -// Automatically-generated message used to represent maps of Response as ordered (name,value) pairs. -message NamedResponse { - // Map key - string name = 1; - // Mapped value - Response value = 2; -} - -// Automatically-generated message used to represent maps of ResponseValue as ordered (name,value) pairs. -message NamedResponseValue { - // Map key - string name = 1; - // Mapped value - ResponseValue value = 2; -} - -// Automatically-generated message used to represent maps of Schema as ordered (name,value) pairs. -message NamedSchema { - // Map key - string name = 1; - // Mapped value - Schema value = 2; -} - -// Automatically-generated message used to represent maps of SecurityDefinitionsItem as ordered (name,value) pairs. -message NamedSecurityDefinitionsItem { - // Map key - string name = 1; - // Mapped value - SecurityDefinitionsItem value = 2; -} - -// Automatically-generated message used to represent maps of string as ordered (name,value) pairs. -message NamedString { - // Map key - string name = 1; - // Mapped value - string value = 2; -} - -// Automatically-generated message used to represent maps of StringArray as ordered (name,value) pairs. -message NamedStringArray { - // Map key - string name = 1; - // Mapped value - StringArray value = 2; -} - -message NonBodyParameter { - oneof oneof { - HeaderParameterSubSchema header_parameter_sub_schema = 1; - FormDataParameterSubSchema form_data_parameter_sub_schema = 2; - QueryParameterSubSchema query_parameter_sub_schema = 3; - PathParameterSubSchema path_parameter_sub_schema = 4; - } -} - -message Oauth2AccessCodeSecurity { - string type = 1; - string flow = 2; - Oauth2Scopes scopes = 3; - string authorization_url = 4; - string token_url = 5; - string description = 6; - repeated NamedAny vendor_extension = 7; -} - -message Oauth2ApplicationSecurity { - string type = 1; - string flow = 2; - Oauth2Scopes scopes = 3; - string token_url = 4; - string description = 5; - repeated NamedAny vendor_extension = 6; -} - -message Oauth2ImplicitSecurity { - string type = 1; - string flow = 2; - Oauth2Scopes scopes = 3; - string authorization_url = 4; - string description = 5; - repeated NamedAny vendor_extension = 6; -} - -message Oauth2PasswordSecurity { - string type = 1; - string flow = 2; - Oauth2Scopes scopes = 3; - string token_url = 4; - string description = 5; - repeated NamedAny vendor_extension = 6; -} - -message Oauth2Scopes { - repeated NamedString additional_properties = 1; -} - -message Operation { - repeated string tags = 1; - // A brief summary of the operation. - string summary = 2; - // A longer description of the operation, GitHub Flavored Markdown is allowed. - string description = 3; - ExternalDocs external_docs = 4; - // A unique identifier of the operation. - string operation_id = 5; - // A list of MIME types the API can produce. - repeated string produces = 6; - // A list of MIME types the API can consume. - repeated string consumes = 7; - // The parameters needed to send a valid API call. - repeated ParametersItem parameters = 8; - Responses responses = 9; - // The transfer protocol of the API. - repeated string schemes = 10; - bool deprecated = 11; - repeated SecurityRequirement security = 12; - repeated NamedAny vendor_extension = 13; -} - -message Parameter { - oneof oneof { - BodyParameter body_parameter = 1; - NonBodyParameter non_body_parameter = 2; - } -} - -// One or more JSON representations for parameters -message ParameterDefinitions { - repeated NamedParameter additional_properties = 1; -} - -message ParametersItem { - oneof oneof { - Parameter parameter = 1; - JsonReference json_reference = 2; - } -} - -message PathItem { - string _ref = 1; - Operation get = 2; - Operation put = 3; - Operation post = 4; - Operation delete = 5; - Operation options = 6; - Operation head = 7; - Operation patch = 8; - // The parameters needed to send a valid API call. - repeated ParametersItem parameters = 9; - repeated NamedAny vendor_extension = 10; -} - -message PathParameterSubSchema { - // Determines whether or not this parameter is required or optional. - bool required = 1; - // Determines the location of the parameter. - string in = 2; - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - string description = 3; - // The name of the parameter. - string name = 4; - string type = 5; - string format = 6; - PrimitivesItems items = 7; - string collection_format = 8; - Any default = 9; - double maximum = 10; - bool exclusive_maximum = 11; - double minimum = 12; - bool exclusive_minimum = 13; - int64 max_length = 14; - int64 min_length = 15; - string pattern = 16; - int64 max_items = 17; - int64 min_items = 18; - bool unique_items = 19; - repeated Any enum = 20; - double multiple_of = 21; - repeated NamedAny vendor_extension = 22; -} - -// Relative paths to the individual endpoints. They must be relative to the 'basePath'. -message Paths { - repeated NamedAny vendor_extension = 1; - repeated NamedPathItem path = 2; -} - -message PrimitivesItems { - string type = 1; - string format = 2; - PrimitivesItems items = 3; - string collection_format = 4; - Any default = 5; - double maximum = 6; - bool exclusive_maximum = 7; - double minimum = 8; - bool exclusive_minimum = 9; - int64 max_length = 10; - int64 min_length = 11; - string pattern = 12; - int64 max_items = 13; - int64 min_items = 14; - bool unique_items = 15; - repeated Any enum = 16; - double multiple_of = 17; - repeated NamedAny vendor_extension = 18; -} - -message Properties { - repeated NamedSchema additional_properties = 1; -} - -message QueryParameterSubSchema { - // Determines whether or not this parameter is required or optional. - bool required = 1; - // Determines the location of the parameter. - string in = 2; - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - string description = 3; - // The name of the parameter. - string name = 4; - // allows sending a parameter by name only or with an empty value. - bool allow_empty_value = 5; - string type = 6; - string format = 7; - PrimitivesItems items = 8; - string collection_format = 9; - Any default = 10; - double maximum = 11; - bool exclusive_maximum = 12; - double minimum = 13; - bool exclusive_minimum = 14; - int64 max_length = 15; - int64 min_length = 16; - string pattern = 17; - int64 max_items = 18; - int64 min_items = 19; - bool unique_items = 20; - repeated Any enum = 21; - double multiple_of = 22; - repeated NamedAny vendor_extension = 23; -} - -message Response { - string description = 1; - SchemaItem schema = 2; - Headers headers = 3; - Examples examples = 4; - repeated NamedAny vendor_extension = 5; -} - -// One or more JSON representations for parameters -message ResponseDefinitions { - repeated NamedResponse additional_properties = 1; -} - -message ResponseValue { - oneof oneof { - Response response = 1; - JsonReference json_reference = 2; - } -} - -// Response objects names can either be any valid HTTP status code or 'default'. -message Responses { - repeated NamedResponseValue response_code = 1; - repeated NamedAny vendor_extension = 2; -} - -// A deterministic version of a JSON Schema object. -message Schema { - string _ref = 1; - string format = 2; - string title = 3; - string description = 4; - Any default = 5; - double multiple_of = 6; - double maximum = 7; - bool exclusive_maximum = 8; - double minimum = 9; - bool exclusive_minimum = 10; - int64 max_length = 11; - int64 min_length = 12; - string pattern = 13; - int64 max_items = 14; - int64 min_items = 15; - bool unique_items = 16; - int64 max_properties = 17; - int64 min_properties = 18; - repeated string required = 19; - repeated Any enum = 20; - AdditionalPropertiesItem additional_properties = 21; - TypeItem type = 22; - ItemsItem items = 23; - repeated Schema all_of = 24; - Properties properties = 25; - string discriminator = 26; - bool read_only = 27; - Xml xml = 28; - ExternalDocs external_docs = 29; - Any example = 30; - repeated NamedAny vendor_extension = 31; -} - -message SchemaItem { - oneof oneof { - Schema schema = 1; - FileSchema file_schema = 2; - } -} - -message SecurityDefinitions { - repeated NamedSecurityDefinitionsItem additional_properties = 1; -} - -message SecurityDefinitionsItem { - oneof oneof { - BasicAuthenticationSecurity basic_authentication_security = 1; - ApiKeySecurity api_key_security = 2; - Oauth2ImplicitSecurity oauth2_implicit_security = 3; - Oauth2PasswordSecurity oauth2_password_security = 4; - Oauth2ApplicationSecurity oauth2_application_security = 5; - Oauth2AccessCodeSecurity oauth2_access_code_security = 6; - } -} - -message SecurityRequirement { - repeated NamedStringArray additional_properties = 1; -} - -message StringArray { - repeated string value = 1; -} - -message Tag { - string name = 1; - string description = 2; - ExternalDocs external_docs = 3; - repeated NamedAny vendor_extension = 4; -} - -message TypeItem { - repeated string value = 1; -} - -// Any property starting with x- is valid. -message VendorExtension { - repeated NamedAny additional_properties = 1; -} - -message Xml { - string name = 1; - string namespace = 2; - string prefix = 3; - bool attribute = 4; - bool wrapped = 5; - repeated NamedAny vendor_extension = 6; -} - diff --git a/cmd/bpa-operator/vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md b/cmd/bpa-operator/vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md deleted file mode 100644 index 836fb32..0000000 --- a/cmd/bpa-operator/vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md +++ /dev/null @@ -1,16 +0,0 @@ -# OpenAPI v2 Protocol Buffer Models - -This directory contains a Protocol Buffer-language model -and related code for supporting OpenAPI v2. - -Gnostic applications and plugins can use OpenAPIv2.proto -to generate Protocol Buffer support code for their preferred languages. - -OpenAPIv2.go is used by Gnostic to read JSON and YAML OpenAPI -descriptions into the Protocol Buffer-based datastructures -generated from OpenAPIv2.proto. - -OpenAPIv2.proto and OpenAPIv2.go are generated by the Gnostic -compiler generator, and OpenAPIv2.pb.go is generated by -protoc, the Protocol Buffer compiler, and protoc-gen-go, the -Protocol Buffer Go code generation plugin. diff --git a/cmd/bpa-operator/vendor/github.com/googleapis/gnostic/OpenAPIv2/openapi-2.0.json b/cmd/bpa-operator/vendor/github.com/googleapis/gnostic/OpenAPIv2/openapi-2.0.json deleted file mode 100644 index 2815a26..0000000 --- a/cmd/bpa-operator/vendor/github.com/googleapis/gnostic/OpenAPIv2/openapi-2.0.json +++ /dev/null @@ -1,1610 +0,0 @@ -{ - "title": "A JSON Schema for Swagger 2.0 API.", - "id": "http://swagger.io/v2/schema.json#", - "$schema": "http://json-schema.org/draft-04/schema#", - "type": "object", - "required": [ - "swagger", - "info", - "paths" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "swagger": { - "type": "string", - "enum": [ - "2.0" - ], - "description": "The Swagger version of this document." - }, - "info": { - "$ref": "#/definitions/info" - }, - "host": { - "type": "string", - "pattern": "^[^{}/ :\\\\]+(?::\\d+)?$", - "description": "The host (name or ip) of the API. Example: 'swagger.io'" - }, - "basePath": { - "type": "string", - "pattern": "^/", - "description": "The base path to the API. Example: '/api'." - }, - "schemes": { - "$ref": "#/definitions/schemesList" - }, - "consumes": { - "description": "A list of MIME types accepted by the API.", - "allOf": [ - { - "$ref": "#/definitions/mediaTypeList" - } - ] - }, - "produces": { - "description": "A list of MIME types the API can produce.", - "allOf": [ - { - "$ref": "#/definitions/mediaTypeList" - } - ] - }, - "paths": { - "$ref": "#/definitions/paths" - }, - "definitions": { - "$ref": "#/definitions/definitions" - }, - "parameters": { - "$ref": "#/definitions/parameterDefinitions" - }, - "responses": { - "$ref": "#/definitions/responseDefinitions" - }, - "security": { - "$ref": "#/definitions/security" - }, - "securityDefinitions": { - "$ref": "#/definitions/securityDefinitions" - }, - "tags": { - "type": "array", - "items": { - "$ref": "#/definitions/tag" - }, - "uniqueItems": true - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - } - }, - "definitions": { - "info": { - "type": "object", - "description": "General information about the API.", - "required": [ - "version", - "title" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "title": { - "type": "string", - "description": "A unique and precise title of the API." - }, - "version": { - "type": "string", - "description": "A semantic version number of the API." - }, - "description": { - "type": "string", - "description": "A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed." - }, - "termsOfService": { - "type": "string", - "description": "The terms of service for the API." - }, - "contact": { - "$ref": "#/definitions/contact" - }, - "license": { - "$ref": "#/definitions/license" - } - } - }, - "contact": { - "type": "object", - "description": "Contact information for the owners of the API.", - "additionalProperties": false, - "properties": { - "name": { - "type": "string", - "description": "The identifying name of the contact person/organization." - }, - "url": { - "type": "string", - "description": "The URL pointing to the contact information.", - "format": "uri" - }, - "email": { - "type": "string", - "description": "The email address of the contact person/organization.", - "format": "email" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "license": { - "type": "object", - "required": [ - "name" - ], - "additionalProperties": false, - "properties": { - "name": { - "type": "string", - "description": "The name of the license type. It's encouraged to use an OSI compatible license." - }, - "url": { - "type": "string", - "description": "The URL pointing to the license.", - "format": "uri" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "paths": { - "type": "object", - "description": "Relative paths to the individual endpoints. They must be relative to the 'basePath'.", - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - }, - "^/": { - "$ref": "#/definitions/pathItem" - } - }, - "additionalProperties": false - }, - "definitions": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/schema" - }, - "description": "One or more JSON objects describing the schemas being consumed and produced by the API." - }, - "parameterDefinitions": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/parameter" - }, - "description": "One or more JSON representations for parameters" - }, - "responseDefinitions": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/response" - }, - "description": "One or more JSON representations for parameters" - }, - "externalDocs": { - "type": "object", - "additionalProperties": false, - "description": "information about external documentation", - "required": [ - "url" - ], - "properties": { - "description": { - "type": "string" - }, - "url": { - "type": "string", - "format": "uri" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "examples": { - "type": "object", - "additionalProperties": true - }, - "mimeType": { - "type": "string", - "description": "The MIME type of the HTTP message." - }, - "operation": { - "type": "object", - "required": [ - "responses" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "tags": { - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true - }, - "summary": { - "type": "string", - "description": "A brief summary of the operation." - }, - "description": { - "type": "string", - "description": "A longer description of the operation, GitHub Flavored Markdown is allowed." - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - }, - "operationId": { - "type": "string", - "description": "A unique identifier of the operation." - }, - "produces": { - "description": "A list of MIME types the API can produce.", - "allOf": [ - { - "$ref": "#/definitions/mediaTypeList" - } - ] - }, - "consumes": { - "description": "A list of MIME types the API can consume.", - "allOf": [ - { - "$ref": "#/definitions/mediaTypeList" - } - ] - }, - "parameters": { - "$ref": "#/definitions/parametersList" - }, - "responses": { - "$ref": "#/definitions/responses" - }, - "schemes": { - "$ref": "#/definitions/schemesList" - }, - "deprecated": { - "type": "boolean", - "default": false - }, - "security": { - "$ref": "#/definitions/security" - } - } - }, - "pathItem": { - "type": "object", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "$ref": { - "type": "string" - }, - "get": { - "$ref": "#/definitions/operation" - }, - "put": { - "$ref": "#/definitions/operation" - }, - "post": { - "$ref": "#/definitions/operation" - }, - "delete": { - "$ref": "#/definitions/operation" - }, - "options": { - "$ref": "#/definitions/operation" - }, - "head": { - "$ref": "#/definitions/operation" - }, - "patch": { - "$ref": "#/definitions/operation" - }, - "parameters": { - "$ref": "#/definitions/parametersList" - } - } - }, - "responses": { - "type": "object", - "description": "Response objects names can either be any valid HTTP status code or 'default'.", - "minProperties": 1, - "additionalProperties": false, - "patternProperties": { - "^([0-9]{3})$|^(default)$": { - "$ref": "#/definitions/responseValue" - }, - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "not": { - "type": "object", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - } - }, - "responseValue": { - "oneOf": [ - { - "$ref": "#/definitions/response" - }, - { - "$ref": "#/definitions/jsonReference" - } - ] - }, - "response": { - "type": "object", - "required": [ - "description" - ], - "properties": { - "description": { - "type": "string" - }, - "schema": { - "oneOf": [ - { - "$ref": "#/definitions/schema" - }, - { - "$ref": "#/definitions/fileSchema" - } - ] - }, - "headers": { - "$ref": "#/definitions/headers" - }, - "examples": { - "$ref": "#/definitions/examples" - } - }, - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "headers": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/header" - } - }, - "header": { - "type": "object", - "additionalProperties": false, - "required": [ - "type" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "string", - "number", - "integer", - "boolean", - "array" - ] - }, - "format": { - "type": "string" - }, - "items": { - "$ref": "#/definitions/primitivesItems" - }, - "collectionFormat": { - "$ref": "#/definitions/collectionFormat" - }, - "default": { - "$ref": "#/definitions/default" - }, - "maximum": { - "$ref": "#/definitions/maximum" - }, - "exclusiveMaximum": { - "$ref": "#/definitions/exclusiveMaximum" - }, - "minimum": { - "$ref": "#/definitions/minimum" - }, - "exclusiveMinimum": { - "$ref": "#/definitions/exclusiveMinimum" - }, - "maxLength": { - "$ref": "#/definitions/maxLength" - }, - "minLength": { - "$ref": "#/definitions/minLength" - }, - "pattern": { - "$ref": "#/definitions/pattern" - }, - "maxItems": { - "$ref": "#/definitions/maxItems" - }, - "minItems": { - "$ref": "#/definitions/minItems" - }, - "uniqueItems": { - "$ref": "#/definitions/uniqueItems" - }, - "enum": { - "$ref": "#/definitions/enum" - }, - "multipleOf": { - "$ref": "#/definitions/multipleOf" - }, - "description": { - "type": "string" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "vendorExtension": { - "description": "Any property starting with x- is valid.", - "additionalProperties": true, - "additionalItems": true - }, - "bodyParameter": { - "type": "object", - "required": [ - "name", - "in", - "schema" - ], - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "description": { - "type": "string", - "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." - }, - "name": { - "type": "string", - "description": "The name of the parameter." - }, - "in": { - "type": "string", - "description": "Determines the location of the parameter.", - "enum": [ - "body" - ] - }, - "required": { - "type": "boolean", - "description": "Determines whether or not this parameter is required or optional.", - "default": false - }, - "schema": { - "$ref": "#/definitions/schema" - } - }, - "additionalProperties": false - }, - "headerParameterSubSchema": { - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "required": { - "type": "boolean", - "description": "Determines whether or not this parameter is required or optional.", - "default": false - }, - "in": { - "type": "string", - "description": "Determines the location of the parameter.", - "enum": [ - "header" - ] - }, - "description": { - "type": "string", - "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." - }, - "name": { - "type": "string", - "description": "The name of the parameter." - }, - "type": { - "type": "string", - "enum": [ - "string", - "number", - "boolean", - "integer", - "array" - ] - }, - "format": { - "type": "string" - }, - "items": { - "$ref": "#/definitions/primitivesItems" - }, - "collectionFormat": { - "$ref": "#/definitions/collectionFormat" - }, - "default": { - "$ref": "#/definitions/default" - }, - "maximum": { - "$ref": "#/definitions/maximum" - }, - "exclusiveMaximum": { - "$ref": "#/definitions/exclusiveMaximum" - }, - "minimum": { - "$ref": "#/definitions/minimum" - }, - "exclusiveMinimum": { - "$ref": "#/definitions/exclusiveMinimum" - }, - "maxLength": { - "$ref": "#/definitions/maxLength" - }, - "minLength": { - "$ref": "#/definitions/minLength" - }, - "pattern": { - "$ref": "#/definitions/pattern" - }, - "maxItems": { - "$ref": "#/definitions/maxItems" - }, - "minItems": { - "$ref": "#/definitions/minItems" - }, - "uniqueItems": { - "$ref": "#/definitions/uniqueItems" - }, - "enum": { - "$ref": "#/definitions/enum" - }, - "multipleOf": { - "$ref": "#/definitions/multipleOf" - } - } - }, - "queryParameterSubSchema": { - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "required": { - "type": "boolean", - "description": "Determines whether or not this parameter is required or optional.", - "default": false - }, - "in": { - "type": "string", - "description": "Determines the location of the parameter.", - "enum": [ - "query" - ] - }, - "description": { - "type": "string", - "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." - }, - "name": { - "type": "string", - "description": "The name of the parameter." - }, - "allowEmptyValue": { - "type": "boolean", - "default": false, - "description": "allows sending a parameter by name only or with an empty value." - }, - "type": { - "type": "string", - "enum": [ - "string", - "number", - "boolean", - "integer", - "array" - ] - }, - "format": { - "type": "string" - }, - "items": { - "$ref": "#/definitions/primitivesItems" - }, - "collectionFormat": { - "$ref": "#/definitions/collectionFormatWithMulti" - }, - "default": { - "$ref": "#/definitions/default" - }, - "maximum": { - "$ref": "#/definitions/maximum" - }, - "exclusiveMaximum": { - "$ref": "#/definitions/exclusiveMaximum" - }, - "minimum": { - "$ref": "#/definitions/minimum" - }, - "exclusiveMinimum": { - "$ref": "#/definitions/exclusiveMinimum" - }, - "maxLength": { - "$ref": "#/definitions/maxLength" - }, - "minLength": { - "$ref": "#/definitions/minLength" - }, - "pattern": { - "$ref": "#/definitions/pattern" - }, - "maxItems": { - "$ref": "#/definitions/maxItems" - }, - "minItems": { - "$ref": "#/definitions/minItems" - }, - "uniqueItems": { - "$ref": "#/definitions/uniqueItems" - }, - "enum": { - "$ref": "#/definitions/enum" - }, - "multipleOf": { - "$ref": "#/definitions/multipleOf" - } - } - }, - "formDataParameterSubSchema": { - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "required": { - "type": "boolean", - "description": "Determines whether or not this parameter is required or optional.", - "default": false - }, - "in": { - "type": "string", - "description": "Determines the location of the parameter.", - "enum": [ - "formData" - ] - }, - "description": { - "type": "string", - "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." - }, - "name": { - "type": "string", - "description": "The name of the parameter." - }, - "allowEmptyValue": { - "type": "boolean", - "default": false, - "description": "allows sending a parameter by name only or with an empty value." - }, - "type": { - "type": "string", - "enum": [ - "string", - "number", - "boolean", - "integer", - "array", - "file" - ] - }, - "format": { - "type": "string" - }, - "items": { - "$ref": "#/definitions/primitivesItems" - }, - "collectionFormat": { - "$ref": "#/definitions/collectionFormatWithMulti" - }, - "default": { - "$ref": "#/definitions/default" - }, - "maximum": { - "$ref": "#/definitions/maximum" - }, - "exclusiveMaximum": { - "$ref": "#/definitions/exclusiveMaximum" - }, - "minimum": { - "$ref": "#/definitions/minimum" - }, - "exclusiveMinimum": { - "$ref": "#/definitions/exclusiveMinimum" - }, - "maxLength": { - "$ref": "#/definitions/maxLength" - }, - "minLength": { - "$ref": "#/definitions/minLength" - }, - "pattern": { - "$ref": "#/definitions/pattern" - }, - "maxItems": { - "$ref": "#/definitions/maxItems" - }, - "minItems": { - "$ref": "#/definitions/minItems" - }, - "uniqueItems": { - "$ref": "#/definitions/uniqueItems" - }, - "enum": { - "$ref": "#/definitions/enum" - }, - "multipleOf": { - "$ref": "#/definitions/multipleOf" - } - } - }, - "pathParameterSubSchema": { - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "required": [ - "required" - ], - "properties": { - "required": { - "type": "boolean", - "enum": [ - true - ], - "description": "Determines whether or not this parameter is required or optional." - }, - "in": { - "type": "string", - "description": "Determines the location of the parameter.", - "enum": [ - "path" - ] - }, - "description": { - "type": "string", - "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." - }, - "name": { - "type": "string", - "description": "The name of the parameter." - }, - "type": { - "type": "string", - "enum": [ - "string", - "number", - "boolean", - "integer", - "array" - ] - }, - "format": { - "type": "string" - }, - "items": { - "$ref": "#/definitions/primitivesItems" - }, - "collectionFormat": { - "$ref": "#/definitions/collectionFormat" - }, - "default": { - "$ref": "#/definitions/default" - }, - "maximum": { - "$ref": "#/definitions/maximum" - }, - "exclusiveMaximum": { - "$ref": "#/definitions/exclusiveMaximum" - }, - "minimum": { - "$ref": "#/definitions/minimum" - }, - "exclusiveMinimum": { - "$ref": "#/definitions/exclusiveMinimum" - }, - "maxLength": { - "$ref": "#/definitions/maxLength" - }, - "minLength": { - "$ref": "#/definitions/minLength" - }, - "pattern": { - "$ref": "#/definitions/pattern" - }, - "maxItems": { - "$ref": "#/definitions/maxItems" - }, - "minItems": { - "$ref": "#/definitions/minItems" - }, - "uniqueItems": { - "$ref": "#/definitions/uniqueItems" - }, - "enum": { - "$ref": "#/definitions/enum" - }, - "multipleOf": { - "$ref": "#/definitions/multipleOf" - } - } - }, - "nonBodyParameter": { - "type": "object", - "required": [ - "name", - "in", - "type" - ], - "oneOf": [ - { - "$ref": "#/definitions/headerParameterSubSchema" - }, - { - "$ref": "#/definitions/formDataParameterSubSchema" - }, - { - "$ref": "#/definitions/queryParameterSubSchema" - }, - { - "$ref": "#/definitions/pathParameterSubSchema" - } - ] - }, - "parameter": { - "oneOf": [ - { - "$ref": "#/definitions/bodyParameter" - }, - { - "$ref": "#/definitions/nonBodyParameter" - } - ] - }, - "schema": { - "type": "object", - "description": "A deterministic version of a JSON Schema object.", - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "$ref": { - "type": "string" - }, - "format": { - "type": "string" - }, - "title": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/title" - }, - "description": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/description" - }, - "default": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/default" - }, - "multipleOf": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" - }, - "maximum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" - }, - "exclusiveMaximum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" - }, - "minimum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" - }, - "exclusiveMinimum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" - }, - "maxLength": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" - }, - "minLength": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" - }, - "pattern": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" - }, - "maxItems": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" - }, - "minItems": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" - }, - "uniqueItems": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" - }, - "maxProperties": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" - }, - "minProperties": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" - }, - "required": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" - }, - "enum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" - }, - "additionalProperties": { - "oneOf": [ - { - "$ref": "#/definitions/schema" - }, - { - "type": "boolean" - } - ], - "default": {} - }, - "type": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/type" - }, - "items": { - "anyOf": [ - { - "$ref": "#/definitions/schema" - }, - { - "type": "array", - "minItems": 1, - "items": { - "$ref": "#/definitions/schema" - } - } - ], - "default": {} - }, - "allOf": { - "type": "array", - "minItems": 1, - "items": { - "$ref": "#/definitions/schema" - } - }, - "properties": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/schema" - }, - "default": {} - }, - "discriminator": { - "type": "string" - }, - "readOnly": { - "type": "boolean", - "default": false - }, - "xml": { - "$ref": "#/definitions/xml" - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - }, - "example": {} - }, - "additionalProperties": false - }, - "fileSchema": { - "type": "object", - "description": "A deterministic version of a JSON Schema object.", - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "required": [ - "type" - ], - "properties": { - "format": { - "type": "string" - }, - "title": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/title" - }, - "description": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/description" - }, - "default": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/default" - }, - "required": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" - }, - "type": { - "type": "string", - "enum": [ - "file" - ] - }, - "readOnly": { - "type": "boolean", - "default": false - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - }, - "example": {} - }, - "additionalProperties": false - }, - "primitivesItems": { - "type": "object", - "additionalProperties": false, - "properties": { - "type": { - "type": "string", - "enum": [ - "string", - "number", - "integer", - "boolean", - "array" - ] - }, - "format": { - "type": "string" - }, - "items": { - "$ref": "#/definitions/primitivesItems" - }, - "collectionFormat": { - "$ref": "#/definitions/collectionFormat" - }, - "default": { - "$ref": "#/definitions/default" - }, - "maximum": { - "$ref": "#/definitions/maximum" - }, - "exclusiveMaximum": { - "$ref": "#/definitions/exclusiveMaximum" - }, - "minimum": { - "$ref": "#/definitions/minimum" - }, - "exclusiveMinimum": { - "$ref": "#/definitions/exclusiveMinimum" - }, - "maxLength": { - "$ref": "#/definitions/maxLength" - }, - "minLength": { - "$ref": "#/definitions/minLength" - }, - "pattern": { - "$ref": "#/definitions/pattern" - }, - "maxItems": { - "$ref": "#/definitions/maxItems" - }, - "minItems": { - "$ref": "#/definitions/minItems" - }, - "uniqueItems": { - "$ref": "#/definitions/uniqueItems" - }, - "enum": { - "$ref": "#/definitions/enum" - }, - "multipleOf": { - "$ref": "#/definitions/multipleOf" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "security": { - "type": "array", - "items": { - "$ref": "#/definitions/securityRequirement" - }, - "uniqueItems": true - }, - "securityRequirement": { - "type": "object", - "additionalProperties": { - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true - } - }, - "xml": { - "type": "object", - "additionalProperties": false, - "properties": { - "name": { - "type": "string" - }, - "namespace": { - "type": "string" - }, - "prefix": { - "type": "string" - }, - "attribute": { - "type": "boolean", - "default": false - }, - "wrapped": { - "type": "boolean", - "default": false - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "tag": { - "type": "object", - "additionalProperties": false, - "required": [ - "name" - ], - "properties": { - "name": { - "type": "string" - }, - "description": { - "type": "string" - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "securityDefinitions": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "$ref": "#/definitions/basicAuthenticationSecurity" - }, - { - "$ref": "#/definitions/apiKeySecurity" - }, - { - "$ref": "#/definitions/oauth2ImplicitSecurity" - }, - { - "$ref": "#/definitions/oauth2PasswordSecurity" - }, - { - "$ref": "#/definitions/oauth2ApplicationSecurity" - }, - { - "$ref": "#/definitions/oauth2AccessCodeSecurity" - } - ] - } - }, - "basicAuthenticationSecurity": { - "type": "object", - "additionalProperties": false, - "required": [ - "type" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "basic" - ] - }, - "description": { - "type": "string" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "apiKeySecurity": { - "type": "object", - "additionalProperties": false, - "required": [ - "type", - "name", - "in" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "apiKey" - ] - }, - "name": { - "type": "string" - }, - "in": { - "type": "string", - "enum": [ - "header", - "query" - ] - }, - "description": { - "type": "string" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "oauth2ImplicitSecurity": { - "type": "object", - "additionalProperties": false, - "required": [ - "type", - "flow", - "authorizationUrl" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "oauth2" - ] - }, - "flow": { - "type": "string", - "enum": [ - "implicit" - ] - }, - "scopes": { - "$ref": "#/definitions/oauth2Scopes" - }, - "authorizationUrl": { - "type": "string", - "format": "uri" - }, - "description": { - "type": "string" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "oauth2PasswordSecurity": { - "type": "object", - "additionalProperties": false, - "required": [ - "type", - "flow", - "tokenUrl" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "oauth2" - ] - }, - "flow": { - "type": "string", - "enum": [ - "password" - ] - }, - "scopes": { - "$ref": "#/definitions/oauth2Scopes" - }, - "tokenUrl": { - "type": "string", - "format": "uri" - }, - "description": { - "type": "string" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "oauth2ApplicationSecurity": { - "type": "object", - "additionalProperties": false, - "required": [ - "type", - "flow", - "tokenUrl" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "oauth2" - ] - }, - "flow": { - "type": "string", - "enum": [ - "application" - ] - }, - "scopes": { - "$ref": "#/definitions/oauth2Scopes" - }, - "tokenUrl": { - "type": "string", - "format": "uri" - }, - "description": { - "type": "string" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "oauth2AccessCodeSecurity": { - "type": "object", - "additionalProperties": false, - "required": [ - "type", - "flow", - "authorizationUrl", - "tokenUrl" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "oauth2" - ] - }, - "flow": { - "type": "string", - "enum": [ - "accessCode" - ] - }, - "scopes": { - "$ref": "#/definitions/oauth2Scopes" - }, - "authorizationUrl": { - "type": "string", - "format": "uri" - }, - "tokenUrl": { - "type": "string", - "format": "uri" - }, - "description": { - "type": "string" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "oauth2Scopes": { - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "mediaTypeList": { - "type": "array", - "items": { - "$ref": "#/definitions/mimeType" - }, - "uniqueItems": true - }, - "parametersList": { - "type": "array", - "description": "The parameters needed to send a valid API call.", - "additionalItems": false, - "items": { - "oneOf": [ - { - "$ref": "#/definitions/parameter" - }, - { - "$ref": "#/definitions/jsonReference" - } - ] - }, - "uniqueItems": true - }, - "schemesList": { - "type": "array", - "description": "The transfer protocol of the API.", - "items": { - "type": "string", - "enum": [ - "http", - "https", - "ws", - "wss" - ] - }, - "uniqueItems": true - }, - "collectionFormat": { - "type": "string", - "enum": [ - "csv", - "ssv", - "tsv", - "pipes" - ], - "default": "csv" - }, - "collectionFormatWithMulti": { - "type": "string", - "enum": [ - "csv", - "ssv", - "tsv", - "pipes", - "multi" - ], - "default": "csv" - }, - "title": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/title" - }, - "description": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/description" - }, - "default": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/default" - }, - "multipleOf": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" - }, - "maximum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" - }, - "exclusiveMaximum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" - }, - "minimum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" - }, - "exclusiveMinimum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" - }, - "maxLength": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" - }, - "minLength": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" - }, - "pattern": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" - }, - "maxItems": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" - }, - "minItems": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" - }, - "uniqueItems": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" - }, - "enum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" - }, - "jsonReference": { - "type": "object", - "required": [ - "$ref" - ], - "additionalProperties": false, - "properties": { - "$ref": { - "type": "string" - }, - "description": { - "type": "string" - } - } - } - } -} diff --git a/cmd/bpa-operator/vendor/github.com/googleapis/gnostic/compiler/README.md b/cmd/bpa-operator/vendor/github.com/googleapis/gnostic/compiler/README.md deleted file mode 100644 index 848b16c..0000000 --- a/cmd/bpa-operator/vendor/github.com/googleapis/gnostic/compiler/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Compiler support code - -This directory contains compiler support code used by Gnostic and Gnostic extensions. \ No newline at end of file diff --git a/cmd/bpa-operator/vendor/github.com/googleapis/gnostic/compiler/context.go b/cmd/bpa-operator/vendor/github.com/googleapis/gnostic/compiler/context.go deleted file mode 100644 index a64c1b7..0000000 --- a/cmd/bpa-operator/vendor/github.com/googleapis/gnostic/compiler/context.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package compiler - -// Context contains state of the compiler as it traverses a document. -type Context struct { - Parent *Context - Name string - ExtensionHandlers *[]ExtensionHandler -} - -// NewContextWithExtensions returns a new object representing the compiler state -func NewContextWithExtensions(name string, parent *Context, extensionHandlers *[]ExtensionHandler) *Context { - return &Context{Name: name, Parent: parent, ExtensionHandlers: extensionHandlers} -} - -// NewContext returns a new object representing the compiler state -func NewContext(name string, parent *Context) *Context { - if parent != nil { - return &Context{Name: name, Parent: parent, ExtensionHandlers: parent.ExtensionHandlers} - } - return &Context{Name: name, Parent: parent, ExtensionHandlers: nil} -} - -// Description returns a text description of the compiler state -func (context *Context) Description() string { - if context.Parent != nil { - return context.Parent.Description() + "." + context.Name - } - return context.Name -} diff --git a/cmd/bpa-operator/vendor/github.com/googleapis/gnostic/compiler/error.go b/cmd/bpa-operator/vendor/github.com/googleapis/gnostic/compiler/error.go deleted file mode 100644 index d8672c1..0000000 --- a/cmd/bpa-operator/vendor/github.com/googleapis/gnostic/compiler/error.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package compiler - -// Error represents compiler errors and their location in the document. -type Error struct { - Context *Context - Message string -} - -// NewError creates an Error. -func NewError(context *Context, message string) *Error { - return &Error{Context: context, Message: message} -} - -// Error returns the string value of an Error. -func (err *Error) Error() string { - if err.Context == nil { - return "ERROR " + err.Message - } - return "ERROR " + err.Context.Description() + " " + err.Message -} - -// ErrorGroup is a container for groups of Error values. -type ErrorGroup struct { - Errors []error -} - -// NewErrorGroupOrNil returns a new ErrorGroup for a slice of errors or nil if the slice is empty. -func NewErrorGroupOrNil(errors []error) error { - if len(errors) == 0 { - return nil - } else if len(errors) == 1 { - return errors[0] - } else { - return &ErrorGroup{Errors: errors} - } -} - -func (group *ErrorGroup) Error() string { - result := "" - for i, err := range group.Errors { - if i > 0 { - result += "\n" - } - result += err.Error() - } - return result -} diff --git a/cmd/bpa-operator/vendor/github.com/googleapis/gnostic/compiler/extension-handler.go b/cmd/bpa-operator/vendor/github.com/googleapis/gnostic/compiler/extension-handler.go deleted file mode 100644 index 1f85b65..0000000 --- a/cmd/bpa-operator/vendor/github.com/googleapis/gnostic/compiler/extension-handler.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package compiler - -import ( - "bytes" - "fmt" - "os/exec" - - "strings" - - "errors" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes/any" - ext_plugin "github.com/googleapis/gnostic/extensions" - yaml "gopkg.in/yaml.v2" -) - -// ExtensionHandler describes a binary that is called by the compiler to handle specification extensions. -type ExtensionHandler struct { - Name string -} - -// HandleExtension calls a binary extension handler. -func HandleExtension(context *Context, in interface{}, extensionName string) (bool, *any.Any, error) { - handled := false - var errFromPlugin error - var outFromPlugin *any.Any - - if context != nil && context.ExtensionHandlers != nil && len(*(context.ExtensionHandlers)) != 0 { - for _, customAnyProtoGenerator := range *(context.ExtensionHandlers) { - outFromPlugin, errFromPlugin = customAnyProtoGenerator.handle(in, extensionName) - if outFromPlugin == nil { - continue - } else { - handled = true - break - } - } - } - return handled, outFromPlugin, errFromPlugin -} - -func (extensionHandlers *ExtensionHandler) handle(in interface{}, extensionName string) (*any.Any, error) { - if extensionHandlers.Name != "" { - binary, _ := yaml.Marshal(in) - - request := &ext_plugin.ExtensionHandlerRequest{} - - version := &ext_plugin.Version{} - version.Major = 0 - version.Minor = 1 - version.Patch = 0 - request.CompilerVersion = version - - request.Wrapper = &ext_plugin.Wrapper{} - - request.Wrapper.Version = "v2" - request.Wrapper.Yaml = string(binary) - request.Wrapper.ExtensionName = extensionName - - requestBytes, _ := proto.Marshal(request) - cmd := exec.Command(extensionHandlers.Name) - cmd.Stdin = bytes.NewReader(requestBytes) - output, err := cmd.Output() - - if err != nil { - fmt.Printf("Error: %+v\n", err) - return nil, err - } - response := &ext_plugin.ExtensionHandlerResponse{} - err = proto.Unmarshal(output, response) - if err != nil { - fmt.Printf("Error: %+v\n", err) - fmt.Printf("%s\n", string(output)) - return nil, err - } - if !response.Handled { - return nil, nil - } - if len(response.Error) != 0 { - message := fmt.Sprintf("Errors when parsing: %+v for field %s by vendor extension handler %s. Details %+v", in, extensionName, extensionHandlers.Name, strings.Join(response.Error, ",")) - return nil, errors.New(message) - } - return response.Value, nil - } - return nil, nil -} diff --git a/cmd/bpa-operator/vendor/github.com/googleapis/gnostic/compiler/helpers.go b/cmd/bpa-operator/vendor/github.com/googleapis/gnostic/compiler/helpers.go deleted file mode 100644 index 76df635..0000000 --- a/cmd/bpa-operator/vendor/github.com/googleapis/gnostic/compiler/helpers.go +++ /dev/null @@ -1,197 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package compiler - -import ( - "fmt" - "gopkg.in/yaml.v2" - "regexp" - "sort" - "strconv" -) - -// compiler helper functions, usually called from generated code - -// UnpackMap gets a yaml.MapSlice if possible. -func UnpackMap(in interface{}) (yaml.MapSlice, bool) { - m, ok := in.(yaml.MapSlice) - if ok { - return m, true - } - // do we have an empty array? - a, ok := in.([]interface{}) - if ok && len(a) == 0 { - // if so, return an empty map - return yaml.MapSlice{}, true - } - return nil, false -} - -// SortedKeysForMap returns the sorted keys of a yaml.MapSlice. -func SortedKeysForMap(m yaml.MapSlice) []string { - keys := make([]string, 0) - for _, item := range m { - keys = append(keys, item.Key.(string)) - } - sort.Strings(keys) - return keys -} - -// MapHasKey returns true if a yaml.MapSlice contains a specified key. -func MapHasKey(m yaml.MapSlice, key string) bool { - for _, item := range m { - itemKey, ok := item.Key.(string) - if ok && key == itemKey { - return true - } - } - return false -} - -// MapValueForKey gets the value of a map value for a specified key. -func MapValueForKey(m yaml.MapSlice, key string) interface{} { - for _, item := range m { - itemKey, ok := item.Key.(string) - if ok && key == itemKey { - return item.Value - } - } - return nil -} - -// ConvertInterfaceArrayToStringArray converts an array of interfaces to an array of strings, if possible. -func ConvertInterfaceArrayToStringArray(interfaceArray []interface{}) []string { - stringArray := make([]string, 0) - for _, item := range interfaceArray { - v, ok := item.(string) - if ok { - stringArray = append(stringArray, v) - } - } - return stringArray -} - -// MissingKeysInMap identifies which keys from a list of required keys are not in a map. -func MissingKeysInMap(m yaml.MapSlice, requiredKeys []string) []string { - missingKeys := make([]string, 0) - for _, k := range requiredKeys { - if !MapHasKey(m, k) { - missingKeys = append(missingKeys, k) - } - } - return missingKeys -} - -// InvalidKeysInMap returns keys in a map that don't match a list of allowed keys and patterns. -func InvalidKeysInMap(m yaml.MapSlice, allowedKeys []string, allowedPatterns []*regexp.Regexp) []string { - invalidKeys := make([]string, 0) - for _, item := range m { - itemKey, ok := item.Key.(string) - if ok { - key := itemKey - found := false - // does the key match an allowed key? - for _, allowedKey := range allowedKeys { - if key == allowedKey { - found = true - break - } - } - if !found { - // does the key match an allowed pattern? - for _, allowedPattern := range allowedPatterns { - if allowedPattern.MatchString(key) { - found = true - break - } - } - if !found { - invalidKeys = append(invalidKeys, key) - } - } - } - } - return invalidKeys -} - -// DescribeMap describes a map (for debugging purposes). -func DescribeMap(in interface{}, indent string) string { - description := "" - m, ok := in.(map[string]interface{}) - if ok { - keys := make([]string, 0) - for k := range m { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - v := m[k] - description += fmt.Sprintf("%s%s:\n", indent, k) - description += DescribeMap(v, indent+" ") - } - return description - } - a, ok := in.([]interface{}) - if ok { - for i, v := range a { - description += fmt.Sprintf("%s%d:\n", indent, i) - description += DescribeMap(v, indent+" ") - } - return description - } - description += fmt.Sprintf("%s%+v\n", indent, in) - return description -} - -// PluralProperties returns the string "properties" pluralized. -func PluralProperties(count int) string { - if count == 1 { - return "property" - } - return "properties" -} - -// StringArrayContainsValue returns true if a string array contains a specified value. -func StringArrayContainsValue(array []string, value string) bool { - for _, item := range array { - if item == value { - return true - } - } - return false -} - -// StringArrayContainsValues returns true if a string array contains all of a list of specified values. -func StringArrayContainsValues(array []string, values []string) bool { - for _, value := range values { - if !StringArrayContainsValue(array, value) { - return false - } - } - return true -} - -// StringValue returns the string value of an item. -func StringValue(item interface{}) (value string, ok bool) { - value, ok = item.(string) - if ok { - return value, ok - } - intValue, ok := item.(int) - if ok { - return strconv.Itoa(intValue), true - } - return "", false -} diff --git a/cmd/bpa-operator/vendor/github.com/googleapis/gnostic/compiler/main.go b/cmd/bpa-operator/vendor/github.com/googleapis/gnostic/compiler/main.go deleted file mode 100644 index 9713a21..0000000 --- a/cmd/bpa-operator/vendor/github.com/googleapis/gnostic/compiler/main.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package compiler provides support functions to generated compiler code. -package compiler diff --git a/cmd/bpa-operator/vendor/github.com/googleapis/gnostic/compiler/reader.go b/cmd/bpa-operator/vendor/github.com/googleapis/gnostic/compiler/reader.go deleted file mode 100644 index c954a2d..0000000 --- a/cmd/bpa-operator/vendor/github.com/googleapis/gnostic/compiler/reader.go +++ /dev/null @@ -1,175 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package compiler - -import ( - "errors" - "fmt" - "gopkg.in/yaml.v2" - "io/ioutil" - "log" - "net/http" - "net/url" - "path/filepath" - "strings" -) - -var fileCache map[string][]byte -var infoCache map[string]interface{} -var count int64 - -var verboseReader = false - -func initializeFileCache() { - if fileCache == nil { - fileCache = make(map[string][]byte, 0) - } -} - -func initializeInfoCache() { - if infoCache == nil { - infoCache = make(map[string]interface{}, 0) - } -} - -// FetchFile gets a specified file from the local filesystem or a remote location. -func FetchFile(fileurl string) ([]byte, error) { - initializeFileCache() - bytes, ok := fileCache[fileurl] - if ok { - if verboseReader { - log.Printf("Cache hit %s", fileurl) - } - return bytes, nil - } - if verboseReader { - log.Printf("Fetching %s", fileurl) - } - response, err := http.Get(fileurl) - if err != nil { - return nil, err - } - if response.StatusCode != 200 { - return nil, errors.New(fmt.Sprintf("Error downloading %s: %s", fileurl, response.Status)) - } - defer response.Body.Close() - bytes, err = ioutil.ReadAll(response.Body) - if err == nil { - fileCache[fileurl] = bytes - } - return bytes, err -} - -// ReadBytesForFile reads the bytes of a file. -func ReadBytesForFile(filename string) ([]byte, error) { - // is the filename a url? - fileurl, _ := url.Parse(filename) - if fileurl.Scheme != "" { - // yes, fetch it - bytes, err := FetchFile(filename) - if err != nil { - return nil, err - } - return bytes, nil - } - // no, it's a local filename - bytes, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - return bytes, nil -} - -// ReadInfoFromBytes unmarshals a file as a yaml.MapSlice. -func ReadInfoFromBytes(filename string, bytes []byte) (interface{}, error) { - initializeInfoCache() - cachedInfo, ok := infoCache[filename] - if ok { - if verboseReader { - log.Printf("Cache hit info for file %s", filename) - } - return cachedInfo, nil - } - if verboseReader { - log.Printf("Reading info for file %s", filename) - } - var info yaml.MapSlice - err := yaml.Unmarshal(bytes, &info) - if err != nil { - return nil, err - } - if len(filename) > 0 { - infoCache[filename] = info - } - return info, nil -} - -// ReadInfoForRef reads a file and return the fragment needed to resolve a $ref. -func ReadInfoForRef(basefile string, ref string) (interface{}, error) { - initializeInfoCache() - { - info, ok := infoCache[ref] - if ok { - if verboseReader { - log.Printf("Cache hit for ref %s#%s", basefile, ref) - } - return info, nil - } - } - if verboseReader { - log.Printf("Reading info for ref %s#%s", basefile, ref) - } - count = count + 1 - basedir, _ := filepath.Split(basefile) - parts := strings.Split(ref, "#") - var filename string - if parts[0] != "" { - filename = basedir + parts[0] - } else { - filename = basefile - } - bytes, err := ReadBytesForFile(filename) - if err != nil { - return nil, err - } - info, err := ReadInfoFromBytes(filename, bytes) - if err != nil { - log.Printf("File error: %v\n", err) - } else { - if len(parts) > 1 { - path := strings.Split(parts[1], "/") - for i, key := range path { - if i > 0 { - m, ok := info.(yaml.MapSlice) - if ok { - found := false - for _, section := range m { - if section.Key == key { - info = section.Value - found = true - } - } - if !found { - infoCache[ref] = nil - return nil, NewError(nil, fmt.Sprintf("could not resolve %s", ref)) - } - } - } - } - } - } - infoCache[ref] = info - return info, nil -} diff --git a/cmd/bpa-operator/vendor/github.com/googleapis/gnostic/extensions/COMPILE-EXTENSION.sh b/cmd/bpa-operator/vendor/github.com/googleapis/gnostic/extensions/COMPILE-EXTENSION.sh deleted file mode 100644 index 68d02a0..0000000 --- a/cmd/bpa-operator/vendor/github.com/googleapis/gnostic/extensions/COMPILE-EXTENSION.sh +++ /dev/null @@ -1,5 +0,0 @@ -go get github.com/golang/protobuf/protoc-gen-go - -protoc \ ---go_out=Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. *.proto - diff --git a/cmd/bpa-operator/vendor/github.com/googleapis/gnostic/extensions/README.md b/cmd/bpa-operator/vendor/github.com/googleapis/gnostic/extensions/README.md deleted file mode 100644 index ff1c2eb..0000000 --- a/cmd/bpa-operator/vendor/github.com/googleapis/gnostic/extensions/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Extensions - -This directory contains support code for building Gnostic extensions and associated examples. - -Extensions are used to compile vendor or specification extensions into protocol buffer structures. diff --git a/cmd/bpa-operator/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go b/cmd/bpa-operator/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go deleted file mode 100644 index 749ff78..0000000 --- a/cmd/bpa-operator/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go +++ /dev/null @@ -1,218 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: extension.proto - -/* -Package openapiextension_v1 is a generated protocol buffer package. - -It is generated from these files: - extension.proto - -It has these top-level messages: - Version - ExtensionHandlerRequest - ExtensionHandlerResponse - Wrapper -*/ -package openapiextension_v1 - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import google_protobuf "github.com/golang/protobuf/ptypes/any" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// The version number of OpenAPI compiler. -type Version struct { - Major int32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"` - Minor int32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"` - Patch int32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"` - // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should - // be empty for mainline stable releases. - Suffix string `protobuf:"bytes,4,opt,name=suffix" json:"suffix,omitempty"` -} - -func (m *Version) Reset() { *m = Version{} } -func (m *Version) String() string { return proto.CompactTextString(m) } -func (*Version) ProtoMessage() {} -func (*Version) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } - -func (m *Version) GetMajor() int32 { - if m != nil { - return m.Major - } - return 0 -} - -func (m *Version) GetMinor() int32 { - if m != nil { - return m.Minor - } - return 0 -} - -func (m *Version) GetPatch() int32 { - if m != nil { - return m.Patch - } - return 0 -} - -func (m *Version) GetSuffix() string { - if m != nil { - return m.Suffix - } - return "" -} - -// An encoded Request is written to the ExtensionHandler's stdin. -type ExtensionHandlerRequest struct { - // The OpenAPI descriptions that were explicitly listed on the command line. - // The specifications will appear in the order they are specified to gnostic. - Wrapper *Wrapper `protobuf:"bytes,1,opt,name=wrapper" json:"wrapper,omitempty"` - // The version number of openapi compiler. - CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"` -} - -func (m *ExtensionHandlerRequest) Reset() { *m = ExtensionHandlerRequest{} } -func (m *ExtensionHandlerRequest) String() string { return proto.CompactTextString(m) } -func (*ExtensionHandlerRequest) ProtoMessage() {} -func (*ExtensionHandlerRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } - -func (m *ExtensionHandlerRequest) GetWrapper() *Wrapper { - if m != nil { - return m.Wrapper - } - return nil -} - -func (m *ExtensionHandlerRequest) GetCompilerVersion() *Version { - if m != nil { - return m.CompilerVersion - } - return nil -} - -// The extensions writes an encoded ExtensionHandlerResponse to stdout. -type ExtensionHandlerResponse struct { - // true if the extension is handled by the extension handler; false otherwise - Handled bool `protobuf:"varint,1,opt,name=handled" json:"handled,omitempty"` - // Error message. If non-empty, the extension handling failed. - // The extension handler process should exit with status code zero - // even if it reports an error in this way. - // - // This should be used to indicate errors which prevent the extension from - // operating as intended. Errors which indicate a problem in gnostic - // itself -- such as the input Document being unparseable -- should be - // reported by writing a message to stderr and exiting with a non-zero - // status code. - Error []string `protobuf:"bytes,2,rep,name=error" json:"error,omitempty"` - // text output - Value *google_protobuf.Any `protobuf:"bytes,3,opt,name=value" json:"value,omitempty"` -} - -func (m *ExtensionHandlerResponse) Reset() { *m = ExtensionHandlerResponse{} } -func (m *ExtensionHandlerResponse) String() string { return proto.CompactTextString(m) } -func (*ExtensionHandlerResponse) ProtoMessage() {} -func (*ExtensionHandlerResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } - -func (m *ExtensionHandlerResponse) GetHandled() bool { - if m != nil { - return m.Handled - } - return false -} - -func (m *ExtensionHandlerResponse) GetError() []string { - if m != nil { - return m.Error - } - return nil -} - -func (m *ExtensionHandlerResponse) GetValue() *google_protobuf.Any { - if m != nil { - return m.Value - } - return nil -} - -type Wrapper struct { - // version of the OpenAPI specification in which this extension was written. - Version string `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` - // Name of the extension - ExtensionName string `protobuf:"bytes,2,opt,name=extension_name,json=extensionName" json:"extension_name,omitempty"` - // Must be a valid yaml for the proto - Yaml string `protobuf:"bytes,3,opt,name=yaml" json:"yaml,omitempty"` -} - -func (m *Wrapper) Reset() { *m = Wrapper{} } -func (m *Wrapper) String() string { return proto.CompactTextString(m) } -func (*Wrapper) ProtoMessage() {} -func (*Wrapper) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } - -func (m *Wrapper) GetVersion() string { - if m != nil { - return m.Version - } - return "" -} - -func (m *Wrapper) GetExtensionName() string { - if m != nil { - return m.ExtensionName - } - return "" -} - -func (m *Wrapper) GetYaml() string { - if m != nil { - return m.Yaml - } - return "" -} - -func init() { - proto.RegisterType((*Version)(nil), "openapiextension.v1.Version") - proto.RegisterType((*ExtensionHandlerRequest)(nil), "openapiextension.v1.ExtensionHandlerRequest") - proto.RegisterType((*ExtensionHandlerResponse)(nil), "openapiextension.v1.ExtensionHandlerResponse") - proto.RegisterType((*Wrapper)(nil), "openapiextension.v1.Wrapper") -} - -func init() { proto.RegisterFile("extension.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 357 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0x4d, 0x4b, 0xc3, 0x40, - 0x18, 0x84, 0x49, 0xbf, 0x62, 0x56, 0x6c, 0x65, 0x2d, 0x1a, 0xc5, 0x43, 0x09, 0x08, 0x45, 0x64, - 0x4b, 0x15, 0xbc, 0xb7, 0x50, 0xd4, 0x8b, 0x2d, 0x7b, 0xa8, 0x37, 0xcb, 0x36, 0x7d, 0x9b, 0x46, - 0x92, 0xdd, 0x75, 0xf3, 0x61, 0xfb, 0x57, 0x3c, 0xfa, 0x4b, 0x25, 0xbb, 0x49, 0x3d, 0xa8, 0xb7, - 0xcc, 0xc3, 0x24, 0xef, 0xcc, 0x04, 0x75, 0x60, 0x9b, 0x02, 0x4f, 0x42, 0xc1, 0x89, 0x54, 0x22, - 0x15, 0xf8, 0x44, 0x48, 0xe0, 0x4c, 0x86, 0x3f, 0x3c, 0x1f, 0x5e, 0x9c, 0x07, 0x42, 0x04, 0x11, - 0x0c, 0xb4, 0x65, 0x99, 0xad, 0x07, 0x8c, 0xef, 0x8c, 0xdf, 0xf3, 0x91, 0x3d, 0x07, 0x55, 0x18, - 0x71, 0x17, 0x35, 0x63, 0xf6, 0x26, 0x94, 0x6b, 0xf5, 0xac, 0x7e, 0x93, 0x1a, 0xa1, 0x69, 0xc8, - 0x85, 0x72, 0x6b, 0x25, 0x2d, 0x44, 0x41, 0x25, 0x4b, 0xfd, 0x8d, 0x5b, 0x37, 0x54, 0x0b, 0x7c, - 0x8a, 0x5a, 0x49, 0xb6, 0x5e, 0x87, 0x5b, 0xb7, 0xd1, 0xb3, 0xfa, 0x0e, 0x2d, 0x95, 0xf7, 0x69, - 0xa1, 0xb3, 0x49, 0x15, 0xe8, 0x91, 0xf1, 0x55, 0x04, 0x8a, 0xc2, 0x7b, 0x06, 0x49, 0x8a, 0xef, - 0x91, 0xfd, 0xa1, 0x98, 0x94, 0x60, 0xee, 0x1e, 0xde, 0x5e, 0x92, 0x3f, 0x2a, 0x90, 0x17, 0xe3, - 0xa1, 0x95, 0x19, 0x3f, 0xa0, 0x63, 0x5f, 0xc4, 0x32, 0x8c, 0x40, 0x2d, 0x72, 0xd3, 0x40, 0x87, - 0xf9, 0xef, 0x03, 0x65, 0x4b, 0xda, 0xa9, 0xde, 0x2a, 0x81, 0x97, 0x23, 0xf7, 0x77, 0xb6, 0x44, - 0x0a, 0x9e, 0x00, 0x76, 0x91, 0xbd, 0xd1, 0x68, 0xa5, 0xc3, 0x1d, 0xd0, 0x4a, 0x16, 0x03, 0x80, - 0x52, 0x7a, 0x96, 0x7a, 0xdf, 0xa1, 0x46, 0xe0, 0x6b, 0xd4, 0xcc, 0x59, 0x94, 0x41, 0x99, 0xa4, - 0x4b, 0xcc, 0xf0, 0xa4, 0x1a, 0x9e, 0x8c, 0xf8, 0x8e, 0x1a, 0x8b, 0xf7, 0x8a, 0xec, 0xb2, 0x54, - 0x71, 0xa6, 0xaa, 0x60, 0xe9, 0xe1, 0x2a, 0x89, 0xaf, 0x50, 0x7b, 0xdf, 0x62, 0xc1, 0x59, 0x0c, - 0xfa, 0x37, 0x38, 0xf4, 0x68, 0x4f, 0x9f, 0x59, 0x0c, 0x18, 0xa3, 0xc6, 0x8e, 0xc5, 0x91, 0x3e, - 0xeb, 0x50, 0xfd, 0x3c, 0xbe, 0x41, 0x6d, 0xa1, 0x02, 0x12, 0x70, 0x91, 0xa4, 0xa1, 0x4f, 0xf2, - 0xe1, 0x18, 0x4f, 0x25, 0xf0, 0xd1, 0xec, 0x69, 0x5f, 0x77, 0x3e, 0x9c, 0x59, 0x5f, 0xb5, 0xfa, - 0x74, 0x34, 0x59, 0xb6, 0x74, 0xc4, 0xbb, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x84, 0x5c, 0x6b, - 0x80, 0x51, 0x02, 0x00, 0x00, -} diff --git a/cmd/bpa-operator/vendor/github.com/googleapis/gnostic/extensions/extension.proto b/cmd/bpa-operator/vendor/github.com/googleapis/gnostic/extensions/extension.proto deleted file mode 100644 index 04856f9..0000000 --- a/cmd/bpa-operator/vendor/github.com/googleapis/gnostic/extensions/extension.proto +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -import "google/protobuf/any.proto"; -package openapiextension.v1; - -// This option lets the proto compiler generate Java code inside the package -// name (see below) instead of inside an outer class. It creates a simpler -// developer experience by reducing one-level of name nesting and be -// consistent with most programming languages that don't support outer classes. -option java_multiple_files = true; - -// The Java outer classname should be the filename in UpperCamelCase. This -// class is only used to hold proto descriptor, so developers don't need to -// work with it directly. -option java_outer_classname = "OpenAPIExtensionV1"; - -// The Java package name must be proto package name with proper prefix. -option java_package = "org.gnostic.v1"; - -// A reasonable prefix for the Objective-C symbols generated from the package. -// It should at a minimum be 3 characters long, all uppercase, and convention -// is to use an abbreviation of the package name. Something short, but -// hopefully unique enough to not conflict with things that may come along in -// the future. 'GPB' is reserved for the protocol buffer implementation itself. -// -option objc_class_prefix = "OAE"; // "OpenAPI Extension" - -// The version number of OpenAPI compiler. -message Version { - int32 major = 1; - int32 minor = 2; - int32 patch = 3; - // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should - // be empty for mainline stable releases. - string suffix = 4; -} - -// An encoded Request is written to the ExtensionHandler's stdin. -message ExtensionHandlerRequest { - - // The OpenAPI descriptions that were explicitly listed on the command line. - // The specifications will appear in the order they are specified to gnostic. - Wrapper wrapper = 1; - - // The version number of openapi compiler. - Version compiler_version = 3; -} - -// The extensions writes an encoded ExtensionHandlerResponse to stdout. -message ExtensionHandlerResponse { - - // true if the extension is handled by the extension handler; false otherwise - bool handled = 1; - - // Error message. If non-empty, the extension handling failed. - // The extension handler process should exit with status code zero - // even if it reports an error in this way. - // - // This should be used to indicate errors which prevent the extension from - // operating as intended. Errors which indicate a problem in gnostic - // itself -- such as the input Document being unparseable -- should be - // reported by writing a message to stderr and exiting with a non-zero - // status code. - repeated string error = 2; - - // text output - google.protobuf.Any value = 3; -} - -message Wrapper { - // version of the OpenAPI specification in which this extension was written. - string version = 1; - - // Name of the extension - string extension_name = 2; - - // Must be a valid yaml for the proto - string yaml = 3; -} diff --git a/cmd/bpa-operator/vendor/github.com/googleapis/gnostic/extensions/extensions.go b/cmd/bpa-operator/vendor/github.com/googleapis/gnostic/extensions/extensions.go deleted file mode 100644 index 94a8e62..0000000 --- a/cmd/bpa-operator/vendor/github.com/googleapis/gnostic/extensions/extensions.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package openapiextension_v1 - -import ( - "fmt" - "io/ioutil" - "os" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes" -) - -type documentHandler func(version string, extensionName string, document string) -type extensionHandler func(name string, yamlInput string) (bool, proto.Message, error) - -func forInputYamlFromOpenapic(handler documentHandler) { - data, err := ioutil.ReadAll(os.Stdin) - if err != nil { - fmt.Println("File error:", err.Error()) - os.Exit(1) - } - if len(data) == 0 { - fmt.Println("No input data.") - os.Exit(1) - } - request := &ExtensionHandlerRequest{} - err = proto.Unmarshal(data, request) - if err != nil { - fmt.Println("Input error:", err.Error()) - os.Exit(1) - } - handler(request.Wrapper.Version, request.Wrapper.ExtensionName, request.Wrapper.Yaml) -} - -// ProcessExtension calles the handler for a specified extension. -func ProcessExtension(handleExtension extensionHandler) { - response := &ExtensionHandlerResponse{} - forInputYamlFromOpenapic( - func(version string, extensionName string, yamlInput string) { - var newObject proto.Message - var err error - - handled, newObject, err := handleExtension(extensionName, yamlInput) - if !handled { - responseBytes, _ := proto.Marshal(response) - os.Stdout.Write(responseBytes) - os.Exit(0) - } - - // If we reach here, then the extension is handled - response.Handled = true - if err != nil { - response.Error = append(response.Error, err.Error()) - responseBytes, _ := proto.Marshal(response) - os.Stdout.Write(responseBytes) - os.Exit(0) - } - response.Value, err = ptypes.MarshalAny(newObject) - if err != nil { - response.Error = append(response.Error, err.Error()) - responseBytes, _ := proto.Marshal(response) - os.Stdout.Write(responseBytes) - os.Exit(0) - } - }) - - responseBytes, _ := proto.Marshal(response) - os.Stdout.Write(responseBytes) -} diff --git a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/.gitignore b/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/.gitignore deleted file mode 100644 index dd91ed2..0000000 --- a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -**/*.swp -.idea -.vscode diff --git a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/.travis.yml b/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/.travis.yml deleted file mode 100644 index 9153a00..0000000 --- a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/.travis.yml +++ /dev/null @@ -1,25 +0,0 @@ -language: go -sudo: false -install: -- GO111MODULE=off go get golang.org/x/crypto/ssh -- GO111MODULE=off go get -v -tags 'fixtures acceptance' ./... -- GO111MODULE=off go get github.com/wadey/gocovmerge -- GO111MODULE=off go get github.com/mattn/goveralls -- GO111MODULE=off go get golang.org/x/tools/cmd/goimports -go: -- "1.10" -- "1.11" -- "1.12" -- "tip" -env: - global: - - secure: "xSQsAG5wlL9emjbCdxzz/hYQsSpJ/bABO1kkbwMSISVcJ3Nk0u4ywF+LS4bgeOnwPfmFvNTOqVDu3RwEvMeWXSI76t1piCPcObutb2faKLVD/hLoAS76gYX+Z8yGWGHrSB7Do5vTPj1ERe2UljdrnsSeOXzoDwFxYRaZLX4bBOB4AyoGvRniil5QXPATiA1tsWX1VMicj8a4F8X+xeESzjt1Q5Iy31e7vkptu71bhvXCaoo5QhYwT+pLR9dN0S1b7Ro0KVvkRefmr1lUOSYd2e74h6Lc34tC1h3uYZCS4h47t7v5cOXvMNxinEj2C51RvbjvZI1RLVdkuAEJD1Iz4+Ote46nXbZ//6XRZMZz/YxQ13l7ux1PFjgEB6HAapmF5Xd8PRsgeTU9LRJxpiTJ3P5QJ3leS1va8qnziM5kYipj/Rn+V8g2ad/rgkRox9LSiR9VYZD2Pe45YCb1mTKSl2aIJnV7nkOqsShY5LNB4JZSg7xIffA+9YVDktw8dJlATjZqt7WvJJ49g6A61mIUV4C15q2JPGKTkZzDiG81NtmS7hFa7k0yaE2ELgYocbcuyUcAahhxntYTC0i23nJmEHVNiZmBO3u7EgpWe4KGVfumU+lt12tIn5b3dZRBBUk3QakKKozSK1QPHGpk/AZGrhu7H6l8to6IICKWtDcyMPQ=" - - GO111MODULE=on -before_script: -- go vet ./... -script: -- ./script/coverage -- ./script/unittest -- ./script/format -after_success: -- $HOME/gopath/bin/goveralls -service=travis-ci -coverprofile=cover.out diff --git a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/.zuul.yaml b/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/.zuul.yaml deleted file mode 100644 index 776ed5a..0000000 --- a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/.zuul.yaml +++ /dev/null @@ -1,102 +0,0 @@ -- job: - name: gophercloud-unittest - parent: golang-test - description: | - Run gophercloud unit test - run: .zuul/playbooks/gophercloud-unittest/run.yaml - nodeset: ubuntu-xenial-ut - -- job: - name: gophercloud-acceptance-test - parent: golang-test - description: | - Run gophercloud acceptance test on master branch - run: .zuul/playbooks/gophercloud-acceptance-test/run.yaml - -- job: - name: gophercloud-acceptance-test-ironic - parent: golang-test - description: | - Run gophercloud ironic acceptance test on master branch - run: .zuul/playbooks/gophercloud-acceptance-test-ironic/run.yaml - -- job: - name: gophercloud-acceptance-test-queens - parent: gophercloud-acceptance-test - description: | - Run gophercloud acceptance test on queens branch - vars: - global_env: - OS_BRANCH: stable/queens - -- job: - name: gophercloud-acceptance-test-rocky - parent: gophercloud-acceptance-test - description: | - Run gophercloud acceptance test on rocky branch - vars: - global_env: - OS_BRANCH: stable/rocky - -- job: - name: gophercloud-acceptance-test-pike - parent: gophercloud-acceptance-test - description: | - Run gophercloud acceptance test on pike branch - vars: - global_env: - OS_BRANCH: stable/pike - -- job: - name: gophercloud-acceptance-test-ocata - parent: gophercloud-acceptance-test - description: | - Run gophercloud acceptance test on ocata branch - vars: - global_env: - OS_BRANCH: stable/ocata - -- job: - name: gophercloud-acceptance-test-newton - parent: gophercloud-acceptance-test - description: | - Run gophercloud acceptance test on newton branch - vars: - global_env: - OS_BRANCH: stable/newton - -- job: - name: gophercloud-acceptance-test-mitaka - parent: gophercloud-acceptance-test - description: | - Run gophercloud acceptance test on mitaka branch - vars: - global_env: - OS_BRANCH: stable/mitaka - nodeset: ubuntu-trusty - -- project: - name: gophercloud/gophercloud - check: - jobs: - - gophercloud-unittest - - gophercloud-acceptance-test - - gophercloud-acceptance-test-ironic - recheck-mitaka: - jobs: - - gophercloud-acceptance-test-mitaka - recheck-newton: - jobs: - - gophercloud-acceptance-test-newton - recheck-ocata: - jobs: - - gophercloud-acceptance-test-ocata - recheck-pike: - jobs: - - gophercloud-acceptance-test-pike - recheck-queens: - jobs: - - gophercloud-acceptance-test-queens - recheck-rocky: - jobs: - - gophercloud-acceptance-test-rocky diff --git a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md b/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md deleted file mode 100644 index e69de29..0000000 diff --git a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/LICENSE b/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/LICENSE deleted file mode 100644 index fbbbc9e..0000000 --- a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Copyright 2012-2013 Rackspace, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); you may not use -this file except in compliance with the License. You may obtain a copy of the -License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software distributed -under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -CONDITIONS OF ANY KIND, either express or implied. See the License for the -specific language governing permissions and limitations under the License. - ------- - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS diff --git a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/README.md b/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/README.md deleted file mode 100644 index ad29041..0000000 --- a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/README.md +++ /dev/null @@ -1,159 +0,0 @@ -# Gophercloud: an OpenStack SDK for Go -[![Build Status](https://travis-ci.org/gophercloud/gophercloud.svg?branch=master)](https://travis-ci.org/gophercloud/gophercloud) -[![Coverage Status](https://coveralls.io/repos/github/gophercloud/gophercloud/badge.svg?branch=master)](https://coveralls.io/github/gophercloud/gophercloud?branch=master) - -Gophercloud is an OpenStack Go SDK. - -## Useful links - -* [Reference documentation](http://godoc.org/github.com/gophercloud/gophercloud) -* [Effective Go](https://golang.org/doc/effective_go.html) - -## How to install - -Before installing, you need to ensure that your [GOPATH environment variable](https://golang.org/doc/code.html#GOPATH) -is pointing to an appropriate directory where you want to install Gophercloud: - -```bash -mkdir $HOME/go -export GOPATH=$HOME/go -``` - -To protect yourself against changes in your dependencies, we highly recommend choosing a -[dependency management solution](https://github.com/golang/go/wiki/PackageManagementTools) for -your projects, such as [godep](https://github.com/tools/godep). Once this is set up, you can install -Gophercloud as a dependency like so: - -```bash -go get github.com/gophercloud/gophercloud - -# Edit your code to import relevant packages from "github.com/gophercloud/gophercloud" - -godep save ./... -``` - -This will install all the source files you need into a `Godeps/_workspace` directory, which is -referenceable from your own source files when you use the `godep go` command. - -## Getting started - -### Credentials - -Because you'll be hitting an API, you will need to retrieve your OpenStack -credentials and either store them as environment variables or in your local Go -files. The first method is recommended because it decouples credential -information from source code, allowing you to push the latter to your version -control system without any security risk. - -You will need to retrieve the following: - -* username -* password -* a valid Keystone identity URL - -For users that have the OpenStack dashboard installed, there's a shortcut. If -you visit the `project/access_and_security` path in Horizon and click on the -"Download OpenStack RC File" button at the top right hand corner, you will -download a bash file that exports all of your access details to environment -variables. To execute the file, run `source admin-openrc.sh` and you will be -prompted for your password. - -### Authentication - -Once you have access to your credentials, you can begin plugging them into -Gophercloud. The next step is authentication, and this is handled by a base -"Provider" struct. To get one, you can either pass in your credentials -explicitly, or tell Gophercloud to use environment variables: - -```go -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack" - "github.com/gophercloud/gophercloud/openstack/utils" -) - -// Option 1: Pass in the values yourself -opts := gophercloud.AuthOptions{ - IdentityEndpoint: "https://openstack.example.com:5000/v2.0", - Username: "{username}", - Password: "{password}", -} - -// Option 2: Use a utility function to retrieve all your environment variables -opts, err := openstack.AuthOptionsFromEnv() -``` - -Once you have the `opts` variable, you can pass it in and get back a -`ProviderClient` struct: - -```go -provider, err := openstack.AuthenticatedClient(opts) -``` - -The `ProviderClient` is the top-level client that all of your OpenStack services -derive from. The provider contains all of the authentication details that allow -your Go code to access the API - such as the base URL and token ID. - -### Provision a server - -Once we have a base Provider, we inject it as a dependency into each OpenStack -service. In order to work with the Compute API, we need a Compute service -client; which can be created like so: - -```go -client, err := openstack.NewComputeV2(provider, gophercloud.EndpointOpts{ - Region: os.Getenv("OS_REGION_NAME"), -}) -``` - -We then use this `client` for any Compute API operation we want. In our case, -we want to provision a new server - so we invoke the `Create` method and pass -in the flavor ID (hardware specification) and image ID (operating system) we're -interested in: - -```go -import "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" - -server, err := servers.Create(client, servers.CreateOpts{ - Name: "My new server!", - FlavorRef: "flavor_id", - ImageRef: "image_id", -}).Extract() -``` - -The above code sample creates a new server with the parameters, and embodies the -new resource in the `server` variable (a -[`servers.Server`](http://godoc.org/github.com/gophercloud/gophercloud) struct). - -## Advanced Usage - -Have a look at the [FAQ](./docs/FAQ.md) for some tips on customizing the way Gophercloud works. - -## Backwards-Compatibility Guarantees - -None. Vendor it and write tests covering the parts you use. - -## Contributing - -See the [contributing guide](./.github/CONTRIBUTING.md). - -## Help and feedback - -If you're struggling with something or have spotted a potential bug, feel free -to submit an issue to our [bug tracker](https://github.com/gophercloud/gophercloud/issues). - -## Thank You - -We'd like to extend special thanks and appreciation to the following: - -### OpenLab - - - -OpenLab is providing a full CI environment to test each PR and merge for a variety of OpenStack releases. - -### VEXXHOST - - - -VEXXHOST is providing their services to assist with the development and testing of Gophercloud. diff --git a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/auth_options.go b/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/auth_options.go deleted file mode 100644 index 5ffa8d1..0000000 --- a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/auth_options.go +++ /dev/null @@ -1,437 +0,0 @@ -package gophercloud - -/* -AuthOptions stores information needed to authenticate to an OpenStack Cloud. -You can populate one manually, or use a provider's AuthOptionsFromEnv() function -to read relevant information from the standard environment variables. Pass one -to a provider's AuthenticatedClient function to authenticate and obtain a -ProviderClient representing an active session on that provider. - -Its fields are the union of those recognized by each identity implementation and -provider. - -An example of manually providing authentication information: - - opts := gophercloud.AuthOptions{ - IdentityEndpoint: "https://openstack.example.com:5000/v2.0", - Username: "{username}", - Password: "{password}", - TenantID: "{tenant_id}", - } - - provider, err := openstack.AuthenticatedClient(opts) - -An example of using AuthOptionsFromEnv(), where the environment variables can -be read from a file, such as a standard openrc file: - - opts, err := openstack.AuthOptionsFromEnv() - provider, err := openstack.AuthenticatedClient(opts) -*/ -type AuthOptions struct { - // IdentityEndpoint specifies the HTTP endpoint that is required to work with - // the Identity API of the appropriate version. While it's ultimately needed by - // all of the identity services, it will often be populated by a provider-level - // function. - // - // The IdentityEndpoint is typically referred to as the "auth_url" or - // "OS_AUTH_URL" in the information provided by the cloud operator. - IdentityEndpoint string `json:"-"` - - // Username is required if using Identity V2 API. Consult with your provider's - // control panel to discover your account's username. In Identity V3, either - // UserID or a combination of Username and DomainID or DomainName are needed. - Username string `json:"username,omitempty"` - UserID string `json:"-"` - - Password string `json:"password,omitempty"` - - // At most one of DomainID and DomainName must be provided if using Username - // with Identity V3. Otherwise, either are optional. - DomainID string `json:"-"` - DomainName string `json:"name,omitempty"` - - // The TenantID and TenantName fields are optional for the Identity V2 API. - // The same fields are known as project_id and project_name in the Identity - // V3 API, but are collected as TenantID and TenantName here in both cases. - // Some providers allow you to specify a TenantName instead of the TenantId. - // Some require both. Your provider's authentication policies will determine - // how these fields influence authentication. - // If DomainID or DomainName are provided, they will also apply to TenantName. - // It is not currently possible to authenticate with Username and a Domain - // and scope to a Project in a different Domain by using TenantName. To - // accomplish that, the ProjectID will need to be provided as the TenantID - // option. - TenantID string `json:"tenantId,omitempty"` - TenantName string `json:"tenantName,omitempty"` - - // AllowReauth should be set to true if you grant permission for Gophercloud to - // cache your credentials in memory, and to allow Gophercloud to attempt to - // re-authenticate automatically if/when your token expires. If you set it to - // false, it will not cache these settings, but re-authentication will not be - // possible. This setting defaults to false. - // - // NOTE: The reauth function will try to re-authenticate endlessly if left - // unchecked. The way to limit the number of attempts is to provide a custom - // HTTP client to the provider client and provide a transport that implements - // the RoundTripper interface and stores the number of failed retries. For an - // example of this, see here: - // https://github.com/rackspace/rack/blob/1.0.0/auth/clients.go#L311 - AllowReauth bool `json:"-"` - - // TokenID allows users to authenticate (possibly as another user) with an - // authentication token ID. - TokenID string `json:"-"` - - // Scope determines the scoping of the authentication request. - Scope *AuthScope `json:"-"` - - // Authentication through Application Credentials requires supplying name, project and secret - // For project we can use TenantID - ApplicationCredentialID string `json:"-"` - ApplicationCredentialName string `json:"-"` - ApplicationCredentialSecret string `json:"-"` -} - -// AuthScope allows a created token to be limited to a specific domain or project. -type AuthScope struct { - ProjectID string - ProjectName string - DomainID string - DomainName string -} - -// ToTokenV2CreateMap allows AuthOptions to satisfy the AuthOptionsBuilder -// interface in the v2 tokens package -func (opts AuthOptions) ToTokenV2CreateMap() (map[string]interface{}, error) { - // Populate the request map. - authMap := make(map[string]interface{}) - - if opts.Username != "" { - if opts.Password != "" { - authMap["passwordCredentials"] = map[string]interface{}{ - "username": opts.Username, - "password": opts.Password, - } - } else { - return nil, ErrMissingInput{Argument: "Password"} - } - } else if opts.TokenID != "" { - authMap["token"] = map[string]interface{}{ - "id": opts.TokenID, - } - } else { - return nil, ErrMissingInput{Argument: "Username"} - } - - if opts.TenantID != "" { - authMap["tenantId"] = opts.TenantID - } - if opts.TenantName != "" { - authMap["tenantName"] = opts.TenantName - } - - return map[string]interface{}{"auth": authMap}, nil -} - -func (opts *AuthOptions) ToTokenV3CreateMap(scope map[string]interface{}) (map[string]interface{}, error) { - type domainReq struct { - ID *string `json:"id,omitempty"` - Name *string `json:"name,omitempty"` - } - - type projectReq struct { - Domain *domainReq `json:"domain,omitempty"` - Name *string `json:"name,omitempty"` - ID *string `json:"id,omitempty"` - } - - type userReq struct { - ID *string `json:"id,omitempty"` - Name *string `json:"name,omitempty"` - Password string `json:"password,omitempty"` - Domain *domainReq `json:"domain,omitempty"` - } - - type passwordReq struct { - User userReq `json:"user"` - } - - type tokenReq struct { - ID string `json:"id"` - } - - type applicationCredentialReq struct { - ID *string `json:"id,omitempty"` - Name *string `json:"name,omitempty"` - User *userReq `json:"user,omitempty"` - Secret *string `json:"secret,omitempty"` - } - - type identityReq struct { - Methods []string `json:"methods"` - Password *passwordReq `json:"password,omitempty"` - Token *tokenReq `json:"token,omitempty"` - ApplicationCredential *applicationCredentialReq `json:"application_credential,omitempty"` - } - - type authReq struct { - Identity identityReq `json:"identity"` - } - - type request struct { - Auth authReq `json:"auth"` - } - - // Populate the request structure based on the provided arguments. Create and return an error - // if insufficient or incompatible information is present. - var req request - - if opts.Password == "" { - if opts.TokenID != "" { - // Because we aren't using password authentication, it's an error to also provide any of the user-based authentication - // parameters. - if opts.Username != "" { - return nil, ErrUsernameWithToken{} - } - if opts.UserID != "" { - return nil, ErrUserIDWithToken{} - } - if opts.DomainID != "" { - return nil, ErrDomainIDWithToken{} - } - if opts.DomainName != "" { - return nil, ErrDomainNameWithToken{} - } - - // Configure the request for Token authentication. - req.Auth.Identity.Methods = []string{"token"} - req.Auth.Identity.Token = &tokenReq{ - ID: opts.TokenID, - } - - } else if opts.ApplicationCredentialID != "" { - // Configure the request for ApplicationCredentialID authentication. - // https://github.com/openstack/keystoneauth/blob/stable/rocky/keystoneauth1/identity/v3/application_credential.py#L48-L67 - // There are three kinds of possible application_credential requests - // 1. application_credential id + secret - // 2. application_credential name + secret + user_id - // 3. application_credential name + secret + username + domain_id / domain_name - if opts.ApplicationCredentialSecret == "" { - return nil, ErrAppCredMissingSecret{} - } - req.Auth.Identity.Methods = []string{"application_credential"} - req.Auth.Identity.ApplicationCredential = &applicationCredentialReq{ - ID: &opts.ApplicationCredentialID, - Secret: &opts.ApplicationCredentialSecret, - } - } else if opts.ApplicationCredentialName != "" { - if opts.ApplicationCredentialSecret == "" { - return nil, ErrAppCredMissingSecret{} - } - - var userRequest *userReq - - if opts.UserID != "" { - // UserID could be used without the domain information - userRequest = &userReq{ - ID: &opts.UserID, - } - } - - if userRequest == nil && opts.Username == "" { - // Make sure that Username or UserID are provided - return nil, ErrUsernameOrUserID{} - } - - if userRequest == nil && opts.DomainID != "" { - userRequest = &userReq{ - Name: &opts.Username, - Domain: &domainReq{ID: &opts.DomainID}, - } - } - - if userRequest == nil && opts.DomainName != "" { - userRequest = &userReq{ - Name: &opts.Username, - Domain: &domainReq{Name: &opts.DomainName}, - } - } - - // Make sure that DomainID or DomainName are provided among Username - if userRequest == nil { - return nil, ErrDomainIDOrDomainName{} - } - - req.Auth.Identity.Methods = []string{"application_credential"} - req.Auth.Identity.ApplicationCredential = &applicationCredentialReq{ - Name: &opts.ApplicationCredentialName, - User: userRequest, - Secret: &opts.ApplicationCredentialSecret, - } - } else { - // If no password or token ID or ApplicationCredential are available, authentication can't continue. - return nil, ErrMissingPassword{} - } - } else { - // Password authentication. - req.Auth.Identity.Methods = []string{"password"} - - // At least one of Username and UserID must be specified. - if opts.Username == "" && opts.UserID == "" { - return nil, ErrUsernameOrUserID{} - } - - if opts.Username != "" { - // If Username is provided, UserID may not be provided. - if opts.UserID != "" { - return nil, ErrUsernameOrUserID{} - } - - // Either DomainID or DomainName must also be specified. - if opts.DomainID == "" && opts.DomainName == "" { - return nil, ErrDomainIDOrDomainName{} - } - - if opts.DomainID != "" { - if opts.DomainName != "" { - return nil, ErrDomainIDOrDomainName{} - } - - // Configure the request for Username and Password authentication with a DomainID. - req.Auth.Identity.Password = &passwordReq{ - User: userReq{ - Name: &opts.Username, - Password: opts.Password, - Domain: &domainReq{ID: &opts.DomainID}, - }, - } - } - - if opts.DomainName != "" { - // Configure the request for Username and Password authentication with a DomainName. - req.Auth.Identity.Password = &passwordReq{ - User: userReq{ - Name: &opts.Username, - Password: opts.Password, - Domain: &domainReq{Name: &opts.DomainName}, - }, - } - } - } - - if opts.UserID != "" { - // If UserID is specified, neither DomainID nor DomainName may be. - if opts.DomainID != "" { - return nil, ErrDomainIDWithUserID{} - } - if opts.DomainName != "" { - return nil, ErrDomainNameWithUserID{} - } - - // Configure the request for UserID and Password authentication. - req.Auth.Identity.Password = &passwordReq{ - User: userReq{ID: &opts.UserID, Password: opts.Password}, - } - } - } - - b, err := BuildRequestBody(req, "") - if err != nil { - return nil, err - } - - if len(scope) != 0 { - b["auth"].(map[string]interface{})["scope"] = scope - } - - return b, nil -} - -func (opts *AuthOptions) ToTokenV3ScopeMap() (map[string]interface{}, error) { - // For backwards compatibility. - // If AuthOptions.Scope was not set, try to determine it. - // This works well for common scenarios. - if opts.Scope == nil { - opts.Scope = new(AuthScope) - if opts.TenantID != "" { - opts.Scope.ProjectID = opts.TenantID - } else { - if opts.TenantName != "" { - opts.Scope.ProjectName = opts.TenantName - opts.Scope.DomainID = opts.DomainID - opts.Scope.DomainName = opts.DomainName - } - } - } - - if opts.Scope.ProjectName != "" { - // ProjectName provided: either DomainID or DomainName must also be supplied. - // ProjectID may not be supplied. - if opts.Scope.DomainID == "" && opts.Scope.DomainName == "" { - return nil, ErrScopeDomainIDOrDomainName{} - } - if opts.Scope.ProjectID != "" { - return nil, ErrScopeProjectIDOrProjectName{} - } - - if opts.Scope.DomainID != "" { - // ProjectName + DomainID - return map[string]interface{}{ - "project": map[string]interface{}{ - "name": &opts.Scope.ProjectName, - "domain": map[string]interface{}{"id": &opts.Scope.DomainID}, - }, - }, nil - } - - if opts.Scope.DomainName != "" { - // ProjectName + DomainName - return map[string]interface{}{ - "project": map[string]interface{}{ - "name": &opts.Scope.ProjectName, - "domain": map[string]interface{}{"name": &opts.Scope.DomainName}, - }, - }, nil - } - } else if opts.Scope.ProjectID != "" { - // ProjectID provided. ProjectName, DomainID, and DomainName may not be provided. - if opts.Scope.DomainID != "" { - return nil, ErrScopeProjectIDAlone{} - } - if opts.Scope.DomainName != "" { - return nil, ErrScopeProjectIDAlone{} - } - - // ProjectID - return map[string]interface{}{ - "project": map[string]interface{}{ - "id": &opts.Scope.ProjectID, - }, - }, nil - } else if opts.Scope.DomainID != "" { - // DomainID provided. ProjectID, ProjectName, and DomainName may not be provided. - if opts.Scope.DomainName != "" { - return nil, ErrScopeDomainIDOrDomainName{} - } - - // DomainID - return map[string]interface{}{ - "domain": map[string]interface{}{ - "id": &opts.Scope.DomainID, - }, - }, nil - } else if opts.Scope.DomainName != "" { - // DomainName - return map[string]interface{}{ - "domain": map[string]interface{}{ - "name": &opts.Scope.DomainName, - }, - }, nil - } - - return nil, nil -} - -func (opts AuthOptions) CanReauth() bool { - return opts.AllowReauth -} diff --git a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/auth_result.go b/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/auth_result.go deleted file mode 100644 index 2e4699b..0000000 --- a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/auth_result.go +++ /dev/null @@ -1,52 +0,0 @@ -package gophercloud - -/* -AuthResult is the result from the request that was used to obtain a provider -client's Keystone token. It is returned from ProviderClient.GetAuthResult(). - -The following types satisfy this interface: - - github.com/gophercloud/gophercloud/openstack/identity/v2/tokens.CreateResult - github.com/gophercloud/gophercloud/openstack/identity/v3/tokens.CreateResult - -Usage example: - - import ( - "github.com/gophercloud/gophercloud" - tokens2 "github.com/gophercloud/gophercloud/openstack/identity/v2/tokens" - tokens3 "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens" - ) - - func GetAuthenticatedUserID(providerClient *gophercloud.ProviderClient) (string, error) { - r := providerClient.GetAuthResult() - if r == nil { - //ProviderClient did not use openstack.Authenticate(), e.g. because token - //was set manually with ProviderClient.SetToken() - return "", errors.New("no AuthResult available") - } - switch r := r.(type) { - case tokens2.CreateResult: - u, err := r.ExtractUser() - if err != nil { - return "", err - } - return u.ID, nil - case tokens3.CreateResult: - u, err := r.ExtractUser() - if err != nil { - return "", err - } - return u.ID, nil - default: - panic(fmt.Sprintf("got unexpected AuthResult type %t", r)) - } - } - -Both implementing types share a lot of methods by name, like ExtractUser() in -this example. But those methods cannot be part of the AuthResult interface -because the return types are different (in this case, type tokens2.User vs. -type tokens3.User). -*/ -type AuthResult interface { - ExtractTokenID() (string, error) -} diff --git a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/doc.go b/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/doc.go deleted file mode 100644 index 953ca82..0000000 --- a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/doc.go +++ /dev/null @@ -1,110 +0,0 @@ -/* -Package gophercloud provides a multi-vendor interface to OpenStack-compatible -clouds. The library has a three-level hierarchy: providers, services, and -resources. - -Authenticating with Providers - -Provider structs represent the cloud providers that offer and manage a -collection of services. You will generally want to create one Provider -client per OpenStack cloud. - - It is now recommended to use the `clientconfig` package found at - https://github.com/gophercloud/utils/tree/master/openstack/clientconfig - for all authentication purposes. - - The below documentation is still relevant. clientconfig simply implements - the below and presents it in an easier and more flexible way. - -Use your OpenStack credentials to create a Provider client. The -IdentityEndpoint is typically refered to as "auth_url" or "OS_AUTH_URL" in -information provided by the cloud operator. Additionally, the cloud may refer to -TenantID or TenantName as project_id and project_name. Credentials are -specified like so: - - opts := gophercloud.AuthOptions{ - IdentityEndpoint: "https://openstack.example.com:5000/v2.0", - Username: "{username}", - Password: "{password}", - TenantID: "{tenant_id}", - } - - provider, err := openstack.AuthenticatedClient(opts) - -You can authenticate with a token by doing: - - opts := gophercloud.AuthOptions{ - IdentityEndpoint: "https://openstack.example.com:5000/v2.0", - TokenID: "{token_id}", - TenantID: "{tenant_id}", - } - - provider, err := openstack.AuthenticatedClient(opts) - -You may also use the openstack.AuthOptionsFromEnv() helper function. This -function reads in standard environment variables frequently found in an -OpenStack `openrc` file. Again note that Gophercloud currently uses "tenant" -instead of "project". - - opts, err := openstack.AuthOptionsFromEnv() - provider, err := openstack.AuthenticatedClient(opts) - -Service Clients - -Service structs are specific to a provider and handle all of the logic and -operations for a particular OpenStack service. Examples of services include: -Compute, Object Storage, Block Storage. In order to define one, you need to -pass in the parent provider, like so: - - opts := gophercloud.EndpointOpts{Region: "RegionOne"} - - client, err := openstack.NewComputeV2(provider, opts) - -Resources - -Resource structs are the domain models that services make use of in order -to work with and represent the state of API resources: - - server, err := servers.Get(client, "{serverId}").Extract() - -Intermediate Result structs are returned for API operations, which allow -generic access to the HTTP headers, response body, and any errors associated -with the network transaction. To turn a result into a usable resource struct, -you must call the Extract method which is chained to the response, or an -Extract function from an applicable extension: - - result := servers.Get(client, "{serverId}") - - // Attempt to extract the disk configuration from the OS-DCF disk config - // extension: - config, err := diskconfig.ExtractGet(result) - -All requests that enumerate a collection return a Pager struct that is used to -iterate through the results one page at a time. Use the EachPage method on that -Pager to handle each successive Page in a closure, then use the appropriate -extraction method from that request's package to interpret that Page as a slice -of results: - - err := servers.List(client, nil).EachPage(func (page pagination.Page) (bool, error) { - s, err := servers.ExtractServers(page) - if err != nil { - return false, err - } - - // Handle the []servers.Server slice. - - // Return "false" or an error to prematurely stop fetching new pages. - return true, nil - }) - -If you want to obtain the entire collection of pages without doing any -intermediary processing on each page, you can use the AllPages method: - - allPages, err := servers.List(client, nil).AllPages() - allServers, err := servers.ExtractServers(allPages) - -This top-level package contains utility functions and data types that are used -throughout the provider and service packages. Of particular note for end users -are the AuthOptions and EndpointOpts structs. -*/ -package gophercloud diff --git a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/endpoint_search.go b/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/endpoint_search.go deleted file mode 100644 index 2fbc3c9..0000000 --- a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/endpoint_search.go +++ /dev/null @@ -1,76 +0,0 @@ -package gophercloud - -// Availability indicates to whom a specific service endpoint is accessible: -// the internet at large, internal networks only, or only to administrators. -// Different identity services use different terminology for these. Identity v2 -// lists them as different kinds of URLs within the service catalog ("adminURL", -// "internalURL", and "publicURL"), while v3 lists them as "Interfaces" in an -// endpoint's response. -type Availability string - -const ( - // AvailabilityAdmin indicates that an endpoint is only available to - // administrators. - AvailabilityAdmin Availability = "admin" - - // AvailabilityPublic indicates that an endpoint is available to everyone on - // the internet. - AvailabilityPublic Availability = "public" - - // AvailabilityInternal indicates that an endpoint is only available within - // the cluster's internal network. - AvailabilityInternal Availability = "internal" -) - -// EndpointOpts specifies search criteria used by queries against an -// OpenStack service catalog. The options must contain enough information to -// unambiguously identify one, and only one, endpoint within the catalog. -// -// Usually, these are passed to service client factory functions in a provider -// package, like "openstack.NewComputeV2()". -type EndpointOpts struct { - // Type [required] is the service type for the client (e.g., "compute", - // "object-store"). Generally, this will be supplied by the service client - // function, but a user-given value will be honored if provided. - Type string - - // Name [optional] is the service name for the client (e.g., "nova") as it - // appears in the service catalog. Services can have the same Type but a - // different Name, which is why both Type and Name are sometimes needed. - Name string - - // Region [required] is the geographic region in which the endpoint resides, - // generally specifying which datacenter should house your resources. - // Required only for services that span multiple regions. - Region string - - // Availability [optional] is the visibility of the endpoint to be returned. - // Valid types include the constants AvailabilityPublic, AvailabilityInternal, - // or AvailabilityAdmin from this package. - // - // Availability is not required, and defaults to AvailabilityPublic. Not all - // providers or services offer all Availability options. - Availability Availability -} - -/* -EndpointLocator is an internal function to be used by provider implementations. - -It provides an implementation that locates a single endpoint from a service -catalog for a specific ProviderClient based on user-provided EndpointOpts. The -provider then uses it to discover related ServiceClients. -*/ -type EndpointLocator func(EndpointOpts) (string, error) - -// ApplyDefaults is an internal method to be used by provider implementations. -// -// It sets EndpointOpts fields if not already set, including a default type. -// Currently, EndpointOpts.Availability defaults to the public endpoint. -func (eo *EndpointOpts) ApplyDefaults(t string) { - if eo.Type == "" { - eo.Type = t - } - if eo.Availability == "" { - eo.Availability = AvailabilityPublic - } -} diff --git a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/errors.go b/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/errors.go deleted file mode 100644 index 0bcb3af..0000000 --- a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/errors.go +++ /dev/null @@ -1,471 +0,0 @@ -package gophercloud - -import ( - "fmt" - "strings" -) - -// BaseError is an error type that all other error types embed. -type BaseError struct { - DefaultErrString string - Info string -} - -func (e BaseError) Error() string { - e.DefaultErrString = "An error occurred while executing a Gophercloud request." - return e.choseErrString() -} - -func (e BaseError) choseErrString() string { - if e.Info != "" { - return e.Info - } - return e.DefaultErrString -} - -// ErrMissingInput is the error when input is required in a particular -// situation but not provided by the user -type ErrMissingInput struct { - BaseError - Argument string -} - -func (e ErrMissingInput) Error() string { - e.DefaultErrString = fmt.Sprintf("Missing input for argument [%s]", e.Argument) - return e.choseErrString() -} - -// ErrInvalidInput is an error type used for most non-HTTP Gophercloud errors. -type ErrInvalidInput struct { - ErrMissingInput - Value interface{} -} - -func (e ErrInvalidInput) Error() string { - e.DefaultErrString = fmt.Sprintf("Invalid input provided for argument [%s]: [%+v]", e.Argument, e.Value) - return e.choseErrString() -} - -// ErrMissingEnvironmentVariable is the error when environment variable is required -// in a particular situation but not provided by the user -type ErrMissingEnvironmentVariable struct { - BaseError - EnvironmentVariable string -} - -func (e ErrMissingEnvironmentVariable) Error() string { - e.DefaultErrString = fmt.Sprintf("Missing environment variable [%s]", e.EnvironmentVariable) - return e.choseErrString() -} - -// ErrMissingAnyoneOfEnvironmentVariables is the error when anyone of the environment variables -// is required in a particular situation but not provided by the user -type ErrMissingAnyoneOfEnvironmentVariables struct { - BaseError - EnvironmentVariables []string -} - -func (e ErrMissingAnyoneOfEnvironmentVariables) Error() string { - e.DefaultErrString = fmt.Sprintf( - "Missing one of the following environment variables [%s]", - strings.Join(e.EnvironmentVariables, ", "), - ) - return e.choseErrString() -} - -// ErrUnexpectedResponseCode is returned by the Request method when a response code other than -// those listed in OkCodes is encountered. -type ErrUnexpectedResponseCode struct { - BaseError - URL string - Method string - Expected []int - Actual int - Body []byte -} - -func (e ErrUnexpectedResponseCode) Error() string { - e.DefaultErrString = fmt.Sprintf( - "Expected HTTP response code %v when accessing [%s %s], but got %d instead\n%s", - e.Expected, e.Method, e.URL, e.Actual, e.Body, - ) - return e.choseErrString() -} - -// ErrDefault400 is the default error type returned on a 400 HTTP response code. -type ErrDefault400 struct { - ErrUnexpectedResponseCode -} - -// ErrDefault401 is the default error type returned on a 401 HTTP response code. -type ErrDefault401 struct { - ErrUnexpectedResponseCode -} - -// ErrDefault403 is the default error type returned on a 403 HTTP response code. -type ErrDefault403 struct { - ErrUnexpectedResponseCode -} - -// ErrDefault404 is the default error type returned on a 404 HTTP response code. -type ErrDefault404 struct { - ErrUnexpectedResponseCode -} - -// ErrDefault405 is the default error type returned on a 405 HTTP response code. -type ErrDefault405 struct { - ErrUnexpectedResponseCode -} - -// ErrDefault408 is the default error type returned on a 408 HTTP response code. -type ErrDefault408 struct { - ErrUnexpectedResponseCode -} - -// ErrDefault409 is the default error type returned on a 409 HTTP response code. -type ErrDefault409 struct { - ErrUnexpectedResponseCode -} - -// ErrDefault429 is the default error type returned on a 429 HTTP response code. -type ErrDefault429 struct { - ErrUnexpectedResponseCode -} - -// ErrDefault500 is the default error type returned on a 500 HTTP response code. -type ErrDefault500 struct { - ErrUnexpectedResponseCode -} - -// ErrDefault503 is the default error type returned on a 503 HTTP response code. -type ErrDefault503 struct { - ErrUnexpectedResponseCode -} - -func (e ErrDefault400) Error() string { - e.DefaultErrString = fmt.Sprintf( - "Bad request with: [%s %s], error message: %s", - e.Method, e.URL, e.Body, - ) - return e.choseErrString() -} -func (e ErrDefault401) Error() string { - return "Authentication failed" -} -func (e ErrDefault403) Error() string { - e.DefaultErrString = fmt.Sprintf( - "Request forbidden: [%s %s], error message: %s", - e.Method, e.URL, e.Body, - ) - return e.choseErrString() -} -func (e ErrDefault404) Error() string { - return "Resource not found" -} -func (e ErrDefault405) Error() string { - return "Method not allowed" -} -func (e ErrDefault408) Error() string { - return "The server timed out waiting for the request" -} -func (e ErrDefault429) Error() string { - return "Too many requests have been sent in a given amount of time. Pause" + - " requests, wait up to one minute, and try again." -} -func (e ErrDefault500) Error() string { - return "Internal Server Error" -} -func (e ErrDefault503) Error() string { - return "The service is currently unable to handle the request due to a temporary" + - " overloading or maintenance. This is a temporary condition. Try again later." -} - -// Err400er is the interface resource error types implement to override the error message -// from a 400 error. -type Err400er interface { - Error400(ErrUnexpectedResponseCode) error -} - -// Err401er is the interface resource error types implement to override the error message -// from a 401 error. -type Err401er interface { - Error401(ErrUnexpectedResponseCode) error -} - -// Err403er is the interface resource error types implement to override the error message -// from a 403 error. -type Err403er interface { - Error403(ErrUnexpectedResponseCode) error -} - -// Err404er is the interface resource error types implement to override the error message -// from a 404 error. -type Err404er interface { - Error404(ErrUnexpectedResponseCode) error -} - -// Err405er is the interface resource error types implement to override the error message -// from a 405 error. -type Err405er interface { - Error405(ErrUnexpectedResponseCode) error -} - -// Err408er is the interface resource error types implement to override the error message -// from a 408 error. -type Err408er interface { - Error408(ErrUnexpectedResponseCode) error -} - -// Err409er is the interface resource error types implement to override the error message -// from a 409 error. -type Err409er interface { - Error409(ErrUnexpectedResponseCode) error -} - -// Err429er is the interface resource error types implement to override the error message -// from a 429 error. -type Err429er interface { - Error429(ErrUnexpectedResponseCode) error -} - -// Err500er is the interface resource error types implement to override the error message -// from a 500 error. -type Err500er interface { - Error500(ErrUnexpectedResponseCode) error -} - -// Err503er is the interface resource error types implement to override the error message -// from a 503 error. -type Err503er interface { - Error503(ErrUnexpectedResponseCode) error -} - -// ErrTimeOut is the error type returned when an operations times out. -type ErrTimeOut struct { - BaseError -} - -func (e ErrTimeOut) Error() string { - e.DefaultErrString = "A time out occurred" - return e.choseErrString() -} - -// ErrUnableToReauthenticate is the error type returned when reauthentication fails. -type ErrUnableToReauthenticate struct { - BaseError - ErrOriginal error -} - -func (e ErrUnableToReauthenticate) Error() string { - e.DefaultErrString = fmt.Sprintf("Unable to re-authenticate: %s", e.ErrOriginal) - return e.choseErrString() -} - -// ErrErrorAfterReauthentication is the error type returned when reauthentication -// succeeds, but an error occurs afterword (usually an HTTP error). -type ErrErrorAfterReauthentication struct { - BaseError - ErrOriginal error -} - -func (e ErrErrorAfterReauthentication) Error() string { - e.DefaultErrString = fmt.Sprintf("Successfully re-authenticated, but got error executing request: %s", e.ErrOriginal) - return e.choseErrString() -} - -// ErrServiceNotFound is returned when no service in a service catalog matches -// the provided EndpointOpts. This is generally returned by provider service -// factory methods like "NewComputeV2()" and can mean that a service is not -// enabled for your account. -type ErrServiceNotFound struct { - BaseError -} - -func (e ErrServiceNotFound) Error() string { - e.DefaultErrString = "No suitable service could be found in the service catalog." - return e.choseErrString() -} - -// ErrEndpointNotFound is returned when no available endpoints match the -// provided EndpointOpts. This is also generally returned by provider service -// factory methods, and usually indicates that a region was specified -// incorrectly. -type ErrEndpointNotFound struct { - BaseError -} - -func (e ErrEndpointNotFound) Error() string { - e.DefaultErrString = "No suitable endpoint could be found in the service catalog." - return e.choseErrString() -} - -// ErrResourceNotFound is the error when trying to retrieve a resource's -// ID by name and the resource doesn't exist. -type ErrResourceNotFound struct { - BaseError - Name string - ResourceType string -} - -func (e ErrResourceNotFound) Error() string { - e.DefaultErrString = fmt.Sprintf("Unable to find %s with name %s", e.ResourceType, e.Name) - return e.choseErrString() -} - -// ErrMultipleResourcesFound is the error when trying to retrieve a resource's -// ID by name and multiple resources have the user-provided name. -type ErrMultipleResourcesFound struct { - BaseError - Name string - Count int - ResourceType string -} - -func (e ErrMultipleResourcesFound) Error() string { - e.DefaultErrString = fmt.Sprintf("Found %d %ss matching %s", e.Count, e.ResourceType, e.Name) - return e.choseErrString() -} - -// ErrUnexpectedType is the error when an unexpected type is encountered -type ErrUnexpectedType struct { - BaseError - Expected string - Actual string -} - -func (e ErrUnexpectedType) Error() string { - e.DefaultErrString = fmt.Sprintf("Expected %s but got %s", e.Expected, e.Actual) - return e.choseErrString() -} - -func unacceptedAttributeErr(attribute string) string { - return fmt.Sprintf("The base Identity V3 API does not accept authentication by %s", attribute) -} - -func redundantWithTokenErr(attribute string) string { - return fmt.Sprintf("%s may not be provided when authenticating with a TokenID", attribute) -} - -func redundantWithUserID(attribute string) string { - return fmt.Sprintf("%s may not be provided when authenticating with a UserID", attribute) -} - -// ErrAPIKeyProvided indicates that an APIKey was provided but can't be used. -type ErrAPIKeyProvided struct{ BaseError } - -func (e ErrAPIKeyProvided) Error() string { - return unacceptedAttributeErr("APIKey") -} - -// ErrTenantIDProvided indicates that a TenantID was provided but can't be used. -type ErrTenantIDProvided struct{ BaseError } - -func (e ErrTenantIDProvided) Error() string { - return unacceptedAttributeErr("TenantID") -} - -// ErrTenantNameProvided indicates that a TenantName was provided but can't be used. -type ErrTenantNameProvided struct{ BaseError } - -func (e ErrTenantNameProvided) Error() string { - return unacceptedAttributeErr("TenantName") -} - -// ErrUsernameWithToken indicates that a Username was provided, but token authentication is being used instead. -type ErrUsernameWithToken struct{ BaseError } - -func (e ErrUsernameWithToken) Error() string { - return redundantWithTokenErr("Username") -} - -// ErrUserIDWithToken indicates that a UserID was provided, but token authentication is being used instead. -type ErrUserIDWithToken struct{ BaseError } - -func (e ErrUserIDWithToken) Error() string { - return redundantWithTokenErr("UserID") -} - -// ErrDomainIDWithToken indicates that a DomainID was provided, but token authentication is being used instead. -type ErrDomainIDWithToken struct{ BaseError } - -func (e ErrDomainIDWithToken) Error() string { - return redundantWithTokenErr("DomainID") -} - -// ErrDomainNameWithToken indicates that a DomainName was provided, but token authentication is being used instead.s -type ErrDomainNameWithToken struct{ BaseError } - -func (e ErrDomainNameWithToken) Error() string { - return redundantWithTokenErr("DomainName") -} - -// ErrUsernameOrUserID indicates that neither username nor userID are specified, or both are at once. -type ErrUsernameOrUserID struct{ BaseError } - -func (e ErrUsernameOrUserID) Error() string { - return "Exactly one of Username and UserID must be provided for password authentication" -} - -// ErrDomainIDWithUserID indicates that a DomainID was provided, but unnecessary because a UserID is being used. -type ErrDomainIDWithUserID struct{ BaseError } - -func (e ErrDomainIDWithUserID) Error() string { - return redundantWithUserID("DomainID") -} - -// ErrDomainNameWithUserID indicates that a DomainName was provided, but unnecessary because a UserID is being used. -type ErrDomainNameWithUserID struct{ BaseError } - -func (e ErrDomainNameWithUserID) Error() string { - return redundantWithUserID("DomainName") -} - -// ErrDomainIDOrDomainName indicates that a username was provided, but no domain to scope it. -// It may also indicate that both a DomainID and a DomainName were provided at once. -type ErrDomainIDOrDomainName struct{ BaseError } - -func (e ErrDomainIDOrDomainName) Error() string { - return "You must provide exactly one of DomainID or DomainName to authenticate by Username" -} - -// ErrMissingPassword indicates that no password was provided and no token is available. -type ErrMissingPassword struct{ BaseError } - -func (e ErrMissingPassword) Error() string { - return "You must provide a password to authenticate" -} - -// ErrScopeDomainIDOrDomainName indicates that a domain ID or Name was required in a Scope, but not present. -type ErrScopeDomainIDOrDomainName struct{ BaseError } - -func (e ErrScopeDomainIDOrDomainName) Error() string { - return "You must provide exactly one of DomainID or DomainName in a Scope with ProjectName" -} - -// ErrScopeProjectIDOrProjectName indicates that both a ProjectID and a ProjectName were provided in a Scope. -type ErrScopeProjectIDOrProjectName struct{ BaseError } - -func (e ErrScopeProjectIDOrProjectName) Error() string { - return "You must provide at most one of ProjectID or ProjectName in a Scope" -} - -// ErrScopeProjectIDAlone indicates that a ProjectID was provided with other constraints in a Scope. -type ErrScopeProjectIDAlone struct{ BaseError } - -func (e ErrScopeProjectIDAlone) Error() string { - return "ProjectID must be supplied alone in a Scope" -} - -// ErrScopeEmpty indicates that no credentials were provided in a Scope. -type ErrScopeEmpty struct{ BaseError } - -func (e ErrScopeEmpty) Error() string { - return "You must provide either a Project or Domain in a Scope" -} - -// ErrAppCredMissingSecret indicates that no Application Credential Secret was provided with Application Credential ID or Name -type ErrAppCredMissingSecret struct{ BaseError } - -func (e ErrAppCredMissingSecret) Error() string { - return "You must provide an Application Credential Secret" -} diff --git a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/go.mod b/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/go.mod deleted file mode 100644 index d1ee3b4..0000000 --- a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/go.mod +++ /dev/null @@ -1,7 +0,0 @@ -module github.com/gophercloud/gophercloud - -require ( - golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67 - golang.org/x/sys v0.0.0-20190209173611-3b5209105503 // indirect - gopkg.in/yaml.v2 v2.2.2 -) diff --git a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/go.sum b/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/go.sum deleted file mode 100644 index 33cb0be..0000000 --- a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/go.sum +++ /dev/null @@ -1,8 +0,0 @@ -golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67 h1:ng3VDlRp5/DHpSWl02R4rM9I+8M2rhmsuLwAMmkLQWE= -golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/sys v0.0.0-20190209173611-3b5209105503 h1:5SvYFrOM3W8Mexn9/oA44Ji7vhXAZQ9hiP+1Q/DMrWg= -golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go b/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go deleted file mode 100644 index 0e8d90f..0000000 --- a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go +++ /dev/null @@ -1,125 +0,0 @@ -package openstack - -import ( - "os" - - "github.com/gophercloud/gophercloud" -) - -var nilOptions = gophercloud.AuthOptions{} - -/* -AuthOptionsFromEnv fills out an identity.AuthOptions structure with the -settings found on the various OpenStack OS_* environment variables. - -The following variables provide sources of truth: OS_AUTH_URL, OS_USERNAME, -OS_PASSWORD and OS_PROJECT_ID. - -Of these, OS_USERNAME, OS_PASSWORD, and OS_AUTH_URL must have settings, -or an error will result. OS_PROJECT_ID, is optional. - -OS_TENANT_ID and OS_TENANT_NAME are deprecated forms of OS_PROJECT_ID and -OS_PROJECT_NAME and the latter are expected against a v3 auth api. - -If OS_PROJECT_ID and OS_PROJECT_NAME are set, they will still be referred -as "tenant" in Gophercloud. - -If OS_PROJECT_NAME is set, it requires OS_PROJECT_ID to be set as well to -handle projects not on the default domain. - -To use this function, first set the OS_* environment variables (for example, -by sourcing an `openrc` file), then: - - opts, err := openstack.AuthOptionsFromEnv() - provider, err := openstack.AuthenticatedClient(opts) -*/ -func AuthOptionsFromEnv() (gophercloud.AuthOptions, error) { - authURL := os.Getenv("OS_AUTH_URL") - username := os.Getenv("OS_USERNAME") - userID := os.Getenv("OS_USERID") - password := os.Getenv("OS_PASSWORD") - tenantID := os.Getenv("OS_TENANT_ID") - tenantName := os.Getenv("OS_TENANT_NAME") - domainID := os.Getenv("OS_DOMAIN_ID") - domainName := os.Getenv("OS_DOMAIN_NAME") - applicationCredentialID := os.Getenv("OS_APPLICATION_CREDENTIAL_ID") - applicationCredentialName := os.Getenv("OS_APPLICATION_CREDENTIAL_NAME") - applicationCredentialSecret := os.Getenv("OS_APPLICATION_CREDENTIAL_SECRET") - - // If OS_PROJECT_ID is set, overwrite tenantID with the value. - if v := os.Getenv("OS_PROJECT_ID"); v != "" { - tenantID = v - } - - // If OS_PROJECT_NAME is set, overwrite tenantName with the value. - if v := os.Getenv("OS_PROJECT_NAME"); v != "" { - tenantName = v - } - - if authURL == "" { - err := gophercloud.ErrMissingEnvironmentVariable{ - EnvironmentVariable: "OS_AUTH_URL", - } - return nilOptions, err - } - - if userID == "" && username == "" { - // Empty username and userID could be ignored, when applicationCredentialID and applicationCredentialSecret are set - if applicationCredentialID == "" && applicationCredentialSecret == "" { - err := gophercloud.ErrMissingAnyoneOfEnvironmentVariables{ - EnvironmentVariables: []string{"OS_USERID", "OS_USERNAME"}, - } - return nilOptions, err - } - } - - if password == "" && applicationCredentialID == "" && applicationCredentialName == "" { - err := gophercloud.ErrMissingEnvironmentVariable{ - EnvironmentVariable: "OS_PASSWORD", - } - return nilOptions, err - } - - if (applicationCredentialID != "" || applicationCredentialName != "") && applicationCredentialSecret == "" { - err := gophercloud.ErrMissingEnvironmentVariable{ - EnvironmentVariable: "OS_APPLICATION_CREDENTIAL_SECRET", - } - return nilOptions, err - } - - if domainID == "" && domainName == "" && tenantID == "" && tenantName != "" { - err := gophercloud.ErrMissingEnvironmentVariable{ - EnvironmentVariable: "OS_PROJECT_ID", - } - return nilOptions, err - } - - if applicationCredentialID == "" && applicationCredentialName != "" && applicationCredentialSecret != "" { - if userID == "" && username == "" { - return nilOptions, gophercloud.ErrMissingAnyoneOfEnvironmentVariables{ - EnvironmentVariables: []string{"OS_USERID", "OS_USERNAME"}, - } - } - if username != "" && domainID == "" && domainName == "" { - return nilOptions, gophercloud.ErrMissingAnyoneOfEnvironmentVariables{ - EnvironmentVariables: []string{"OS_DOMAIN_ID", "OS_DOMAIN_NAME"}, - } - } - } - - ao := gophercloud.AuthOptions{ - IdentityEndpoint: authURL, - UserID: userID, - Username: username, - Password: password, - TenantID: tenantID, - TenantName: tenantName, - DomainID: domainID, - DomainName: domainName, - ApplicationCredentialID: applicationCredentialID, - ApplicationCredentialName: applicationCredentialName, - ApplicationCredentialSecret: applicationCredentialSecret, - } - - return ao, nil -} diff --git a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/client.go b/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/client.go deleted file mode 100644 index 50f2397..0000000 --- a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/client.go +++ /dev/null @@ -1,438 +0,0 @@ -package openstack - -import ( - "fmt" - "reflect" - - "github.com/gophercloud/gophercloud" - tokens2 "github.com/gophercloud/gophercloud/openstack/identity/v2/tokens" - tokens3 "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens" - "github.com/gophercloud/gophercloud/openstack/utils" -) - -const ( - // v2 represents Keystone v2. - // It should never increase beyond 2.0. - v2 = "v2.0" - - // v3 represents Keystone v3. - // The version can be anything from v3 to v3.x. - v3 = "v3" -) - -/* -NewClient prepares an unauthenticated ProviderClient instance. -Most users will probably prefer using the AuthenticatedClient function -instead. - -This is useful if you wish to explicitly control the version of the identity -service that's used for authentication explicitly, for example. - -A basic example of using this would be: - - ao, err := openstack.AuthOptionsFromEnv() - provider, err := openstack.NewClient(ao.IdentityEndpoint) - client, err := openstack.NewIdentityV3(provider, gophercloud.EndpointOpts{}) -*/ -func NewClient(endpoint string) (*gophercloud.ProviderClient, error) { - base, err := utils.BaseEndpoint(endpoint) - if err != nil { - return nil, err - } - - endpoint = gophercloud.NormalizeURL(endpoint) - base = gophercloud.NormalizeURL(base) - - p := new(gophercloud.ProviderClient) - p.IdentityBase = base - p.IdentityEndpoint = endpoint - p.UseTokenLock() - - return p, nil -} - -/* -AuthenticatedClient logs in to an OpenStack cloud found at the identity endpoint -specified by the options, acquires a token, and returns a Provider Client -instance that's ready to operate. - -If the full path to a versioned identity endpoint was specified (example: -http://example.com:5000/v3), that path will be used as the endpoint to query. - -If a versionless endpoint was specified (example: http://example.com:5000/), -the endpoint will be queried to determine which versions of the identity service -are available, then chooses the most recent or most supported version. - -Example: - - ao, err := openstack.AuthOptionsFromEnv() - provider, err := openstack.AuthenticatedClient(ao) - client, err := openstack.NewNetworkV2(client, gophercloud.EndpointOpts{ - Region: os.Getenv("OS_REGION_NAME"), - }) -*/ -func AuthenticatedClient(options gophercloud.AuthOptions) (*gophercloud.ProviderClient, error) { - client, err := NewClient(options.IdentityEndpoint) - if err != nil { - return nil, err - } - - err = Authenticate(client, options) - if err != nil { - return nil, err - } - return client, nil -} - -// Authenticate or re-authenticate against the most recent identity service -// supported at the provided endpoint. -func Authenticate(client *gophercloud.ProviderClient, options gophercloud.AuthOptions) error { - versions := []*utils.Version{ - {ID: v2, Priority: 20, Suffix: "/v2.0/"}, - {ID: v3, Priority: 30, Suffix: "/v3/"}, - } - - chosen, endpoint, err := utils.ChooseVersion(client, versions) - if err != nil { - return err - } - - switch chosen.ID { - case v2: - return v2auth(client, endpoint, options, gophercloud.EndpointOpts{}) - case v3: - return v3auth(client, endpoint, &options, gophercloud.EndpointOpts{}) - default: - // The switch statement must be out of date from the versions list. - return fmt.Errorf("Unrecognized identity version: %s", chosen.ID) - } -} - -// AuthenticateV2 explicitly authenticates against the identity v2 endpoint. -func AuthenticateV2(client *gophercloud.ProviderClient, options gophercloud.AuthOptions, eo gophercloud.EndpointOpts) error { - return v2auth(client, "", options, eo) -} - -func v2auth(client *gophercloud.ProviderClient, endpoint string, options gophercloud.AuthOptions, eo gophercloud.EndpointOpts) error { - v2Client, err := NewIdentityV2(client, eo) - if err != nil { - return err - } - - if endpoint != "" { - v2Client.Endpoint = endpoint - } - - v2Opts := tokens2.AuthOptions{ - IdentityEndpoint: options.IdentityEndpoint, - Username: options.Username, - Password: options.Password, - TenantID: options.TenantID, - TenantName: options.TenantName, - AllowReauth: options.AllowReauth, - TokenID: options.TokenID, - } - - result := tokens2.Create(v2Client, v2Opts) - - err = client.SetTokenAndAuthResult(result) - if err != nil { - return err - } - - catalog, err := result.ExtractServiceCatalog() - if err != nil { - return err - } - - if options.AllowReauth { - // here we're creating a throw-away client (tac). it's a copy of the user's provider client, but - // with the token and reauth func zeroed out. combined with setting `AllowReauth` to `false`, - // this should retry authentication only once - tac := *client - tac.SetThrowaway(true) - tac.ReauthFunc = nil - tac.SetTokenAndAuthResult(nil) - tao := options - tao.AllowReauth = false - client.ReauthFunc = func() error { - err := v2auth(&tac, endpoint, tao, eo) - if err != nil { - return err - } - client.CopyTokenFrom(&tac) - return nil - } - } - client.EndpointLocator = func(opts gophercloud.EndpointOpts) (string, error) { - return V2EndpointURL(catalog, opts) - } - - return nil -} - -// AuthenticateV3 explicitly authenticates against the identity v3 service. -func AuthenticateV3(client *gophercloud.ProviderClient, options tokens3.AuthOptionsBuilder, eo gophercloud.EndpointOpts) error { - return v3auth(client, "", options, eo) -} - -func v3auth(client *gophercloud.ProviderClient, endpoint string, opts tokens3.AuthOptionsBuilder, eo gophercloud.EndpointOpts) error { - // Override the generated service endpoint with the one returned by the version endpoint. - v3Client, err := NewIdentityV3(client, eo) - if err != nil { - return err - } - - if endpoint != "" { - v3Client.Endpoint = endpoint - } - - result := tokens3.Create(v3Client, opts) - - err = client.SetTokenAndAuthResult(result) - if err != nil { - return err - } - - catalog, err := result.ExtractServiceCatalog() - if err != nil { - return err - } - - if opts.CanReauth() { - // here we're creating a throw-away client (tac). it's a copy of the user's provider client, but - // with the token and reauth func zeroed out. combined with setting `AllowReauth` to `false`, - // this should retry authentication only once - tac := *client - tac.SetThrowaway(true) - tac.ReauthFunc = nil - tac.SetTokenAndAuthResult(nil) - var tao tokens3.AuthOptionsBuilder - switch ot := opts.(type) { - case *gophercloud.AuthOptions: - o := *ot - o.AllowReauth = false - tao = &o - case *tokens3.AuthOptions: - o := *ot - o.AllowReauth = false - tao = &o - default: - tao = opts - } - client.ReauthFunc = func() error { - err := v3auth(&tac, endpoint, tao, eo) - if err != nil { - return err - } - client.CopyTokenFrom(&tac) - return nil - } - } - client.EndpointLocator = func(opts gophercloud.EndpointOpts) (string, error) { - return V3EndpointURL(catalog, opts) - } - - return nil -} - -// NewIdentityV2 creates a ServiceClient that may be used to interact with the -// v2 identity service. -func NewIdentityV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - endpoint := client.IdentityBase + "v2.0/" - clientType := "identity" - var err error - if !reflect.DeepEqual(eo, gophercloud.EndpointOpts{}) { - eo.ApplyDefaults(clientType) - endpoint, err = client.EndpointLocator(eo) - if err != nil { - return nil, err - } - } - - return &gophercloud.ServiceClient{ - ProviderClient: client, - Endpoint: endpoint, - Type: clientType, - }, nil -} - -// NewIdentityV3 creates a ServiceClient that may be used to access the v3 -// identity service. -func NewIdentityV3(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - endpoint := client.IdentityBase + "v3/" - clientType := "identity" - var err error - if !reflect.DeepEqual(eo, gophercloud.EndpointOpts{}) { - eo.ApplyDefaults(clientType) - endpoint, err = client.EndpointLocator(eo) - if err != nil { - return nil, err - } - } - - // Ensure endpoint still has a suffix of v3. - // This is because EndpointLocator might have found a versionless - // endpoint or the published endpoint is still /v2.0. In both - // cases, we need to fix the endpoint to point to /v3. - base, err := utils.BaseEndpoint(endpoint) - if err != nil { - return nil, err - } - - base = gophercloud.NormalizeURL(base) - - endpoint = base + "v3/" - - return &gophercloud.ServiceClient{ - ProviderClient: client, - Endpoint: endpoint, - Type: clientType, - }, nil -} - -func initClientOpts(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts, clientType string) (*gophercloud.ServiceClient, error) { - sc := new(gophercloud.ServiceClient) - eo.ApplyDefaults(clientType) - url, err := client.EndpointLocator(eo) - if err != nil { - return sc, err - } - sc.ProviderClient = client - sc.Endpoint = url - sc.Type = clientType - return sc, nil -} - -// NewBareMetalV1 creates a ServiceClient that may be used with the v1 -// bare metal package. -func NewBareMetalV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "baremetal") -} - -// NewBareMetalIntrospectionV1 creates a ServiceClient that may be used with the v1 -// bare metal introspection package. -func NewBareMetalIntrospectionV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "baremetal-inspector") -} - -// NewObjectStorageV1 creates a ServiceClient that may be used with the v1 -// object storage package. -func NewObjectStorageV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "object-store") -} - -// NewComputeV2 creates a ServiceClient that may be used with the v2 compute -// package. -func NewComputeV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "compute") -} - -// NewNetworkV2 creates a ServiceClient that may be used with the v2 network -// package. -func NewNetworkV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - sc, err := initClientOpts(client, eo, "network") - sc.ResourceBase = sc.Endpoint + "v2.0/" - return sc, err -} - -// NewBlockStorageV1 creates a ServiceClient that may be used to access the v1 -// block storage service. -func NewBlockStorageV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "volume") -} - -// NewBlockStorageV2 creates a ServiceClient that may be used to access the v2 -// block storage service. -func NewBlockStorageV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "volumev2") -} - -// NewBlockStorageV3 creates a ServiceClient that may be used to access the v3 block storage service. -func NewBlockStorageV3(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "volumev3") -} - -// NewSharedFileSystemV2 creates a ServiceClient that may be used to access the v2 shared file system service. -func NewSharedFileSystemV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "sharev2") -} - -// NewCDNV1 creates a ServiceClient that may be used to access the OpenStack v1 -// CDN service. -func NewCDNV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "cdn") -} - -// NewOrchestrationV1 creates a ServiceClient that may be used to access the v1 -// orchestration service. -func NewOrchestrationV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "orchestration") -} - -// NewDBV1 creates a ServiceClient that may be used to access the v1 DB service. -func NewDBV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "database") -} - -// NewDNSV2 creates a ServiceClient that may be used to access the v2 DNS -// service. -func NewDNSV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - sc, err := initClientOpts(client, eo, "dns") - sc.ResourceBase = sc.Endpoint + "v2/" - return sc, err -} - -// NewImageServiceV2 creates a ServiceClient that may be used to access the v2 -// image service. -func NewImageServiceV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - sc, err := initClientOpts(client, eo, "image") - sc.ResourceBase = sc.Endpoint + "v2/" - return sc, err -} - -// NewLoadBalancerV2 creates a ServiceClient that may be used to access the v2 -// load balancer service. -func NewLoadBalancerV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - sc, err := initClientOpts(client, eo, "load-balancer") - sc.ResourceBase = sc.Endpoint + "v2.0/" - return sc, err -} - -// NewClusteringV1 creates a ServiceClient that may be used with the v1 clustering -// package. -func NewClusteringV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "clustering") -} - -// NewMessagingV2 creates a ServiceClient that may be used with the v2 messaging -// service. -func NewMessagingV2(client *gophercloud.ProviderClient, clientID string, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - sc, err := initClientOpts(client, eo, "messaging") - sc.MoreHeaders = map[string]string{"Client-ID": clientID} - return sc, err -} - -// NewContainerV1 creates a ServiceClient that may be used with v1 container package -func NewContainerV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "container") -} - -// NewKeyManagerV1 creates a ServiceClient that may be used with the v1 key -// manager service. -func NewKeyManagerV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - sc, err := initClientOpts(client, eo, "key-manager") - sc.ResourceBase = sc.Endpoint + "v1/" - return sc, err -} - -// NewContainerInfraV1 creates a ServiceClient that may be used with the v1 container infra management -// package. -func NewContainerInfraV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "container-infra") -} - -// NewWorkflowV2 creates a ServiceClient that may be used with the v2 workflow management package. -func NewWorkflowV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "workflowv2") -} diff --git a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/doc.go b/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/doc.go deleted file mode 100644 index cedf1f4..0000000 --- a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/doc.go +++ /dev/null @@ -1,14 +0,0 @@ -/* -Package openstack contains resources for the individual OpenStack projects -supported in Gophercloud. It also includes functions to authenticate to an -OpenStack cloud and for provisioning various service-level clients. - -Example of Creating a Service Client - - ao, err := openstack.AuthOptionsFromEnv() - provider, err := openstack.AuthenticatedClient(ao) - client, err := openstack.NewNetworkV2(client, gophercloud.EndpointOpts{ - Region: os.Getenv("OS_REGION_NAME"), - }) -*/ -package openstack diff --git a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/endpoint_location.go b/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/endpoint_location.go deleted file mode 100644 index 12c8aeb..0000000 --- a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/endpoint_location.go +++ /dev/null @@ -1,107 +0,0 @@ -package openstack - -import ( - "github.com/gophercloud/gophercloud" - tokens2 "github.com/gophercloud/gophercloud/openstack/identity/v2/tokens" - tokens3 "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens" -) - -/* -V2EndpointURL discovers the endpoint URL for a specific service from a -ServiceCatalog acquired during the v2 identity service. - -The specified EndpointOpts are used to identify a unique, unambiguous endpoint -to return. It's an error both when multiple endpoints match the provided -criteria and when none do. The minimum that can be specified is a Type, but you -will also often need to specify a Name and/or a Region depending on what's -available on your OpenStack deployment. -*/ -func V2EndpointURL(catalog *tokens2.ServiceCatalog, opts gophercloud.EndpointOpts) (string, error) { - // Extract Endpoints from the catalog entries that match the requested Type, Name if provided, and Region if provided. - var endpoints = make([]tokens2.Endpoint, 0, 1) - for _, entry := range catalog.Entries { - if (entry.Type == opts.Type) && (opts.Name == "" || entry.Name == opts.Name) { - for _, endpoint := range entry.Endpoints { - if opts.Region == "" || endpoint.Region == opts.Region { - endpoints = append(endpoints, endpoint) - } - } - } - } - - // Report an error if the options were ambiguous. - if len(endpoints) > 1 { - err := &ErrMultipleMatchingEndpointsV2{} - err.Endpoints = endpoints - return "", err - } - - // Extract the appropriate URL from the matching Endpoint. - for _, endpoint := range endpoints { - switch opts.Availability { - case gophercloud.AvailabilityPublic: - return gophercloud.NormalizeURL(endpoint.PublicURL), nil - case gophercloud.AvailabilityInternal: - return gophercloud.NormalizeURL(endpoint.InternalURL), nil - case gophercloud.AvailabilityAdmin: - return gophercloud.NormalizeURL(endpoint.AdminURL), nil - default: - err := &ErrInvalidAvailabilityProvided{} - err.Argument = "Availability" - err.Value = opts.Availability - return "", err - } - } - - // Report an error if there were no matching endpoints. - err := &gophercloud.ErrEndpointNotFound{} - return "", err -} - -/* -V3EndpointURL discovers the endpoint URL for a specific service from a Catalog -acquired during the v3 identity service. - -The specified EndpointOpts are used to identify a unique, unambiguous endpoint -to return. It's an error both when multiple endpoints match the provided -criteria and when none do. The minimum that can be specified is a Type, but you -will also often need to specify a Name and/or a Region depending on what's -available on your OpenStack deployment. -*/ -func V3EndpointURL(catalog *tokens3.ServiceCatalog, opts gophercloud.EndpointOpts) (string, error) { - // Extract Endpoints from the catalog entries that match the requested Type, Interface, - // Name if provided, and Region if provided. - var endpoints = make([]tokens3.Endpoint, 0, 1) - for _, entry := range catalog.Entries { - if (entry.Type == opts.Type) && (opts.Name == "" || entry.Name == opts.Name) { - for _, endpoint := range entry.Endpoints { - if opts.Availability != gophercloud.AvailabilityAdmin && - opts.Availability != gophercloud.AvailabilityPublic && - opts.Availability != gophercloud.AvailabilityInternal { - err := &ErrInvalidAvailabilityProvided{} - err.Argument = "Availability" - err.Value = opts.Availability - return "", err - } - if (opts.Availability == gophercloud.Availability(endpoint.Interface)) && - (opts.Region == "" || endpoint.Region == opts.Region || endpoint.RegionID == opts.Region) { - endpoints = append(endpoints, endpoint) - } - } - } - } - - // Report an error if the options were ambiguous. - if len(endpoints) > 1 { - return "", ErrMultipleMatchingEndpointsV3{Endpoints: endpoints} - } - - // Extract the URL from the matching Endpoint. - for _, endpoint := range endpoints { - return gophercloud.NormalizeURL(endpoint.URL), nil - } - - // Report an error if there were no matching endpoints. - err := &gophercloud.ErrEndpointNotFound{} - return "", err -} diff --git a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/errors.go b/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/errors.go deleted file mode 100644 index df410b1..0000000 --- a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/errors.go +++ /dev/null @@ -1,71 +0,0 @@ -package openstack - -import ( - "fmt" - - "github.com/gophercloud/gophercloud" - tokens2 "github.com/gophercloud/gophercloud/openstack/identity/v2/tokens" - tokens3 "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens" -) - -// ErrEndpointNotFound is the error when no suitable endpoint can be found -// in the user's catalog -type ErrEndpointNotFound struct{ gophercloud.BaseError } - -func (e ErrEndpointNotFound) Error() string { - return "No suitable endpoint could be found in the service catalog." -} - -// ErrInvalidAvailabilityProvided is the error when an invalid endpoint -// availability is provided -type ErrInvalidAvailabilityProvided struct{ gophercloud.ErrInvalidInput } - -func (e ErrInvalidAvailabilityProvided) Error() string { - return fmt.Sprintf("Unexpected availability in endpoint query: %s", e.Value) -} - -// ErrMultipleMatchingEndpointsV2 is the error when more than one endpoint -// for the given options is found in the v2 catalog -type ErrMultipleMatchingEndpointsV2 struct { - gophercloud.BaseError - Endpoints []tokens2.Endpoint -} - -func (e ErrMultipleMatchingEndpointsV2) Error() string { - return fmt.Sprintf("Discovered %d matching endpoints: %#v", len(e.Endpoints), e.Endpoints) -} - -// ErrMultipleMatchingEndpointsV3 is the error when more than one endpoint -// for the given options is found in the v3 catalog -type ErrMultipleMatchingEndpointsV3 struct { - gophercloud.BaseError - Endpoints []tokens3.Endpoint -} - -func (e ErrMultipleMatchingEndpointsV3) Error() string { - return fmt.Sprintf("Discovered %d matching endpoints: %#v", len(e.Endpoints), e.Endpoints) -} - -// ErrNoAuthURL is the error when the OS_AUTH_URL environment variable is not -// found -type ErrNoAuthURL struct{ gophercloud.ErrInvalidInput } - -func (e ErrNoAuthURL) Error() string { - return "Environment variable OS_AUTH_URL needs to be set." -} - -// ErrNoUsername is the error when the OS_USERNAME environment variable is not -// found -type ErrNoUsername struct{ gophercloud.ErrInvalidInput } - -func (e ErrNoUsername) Error() string { - return "Environment variable OS_USERNAME needs to be set." -} - -// ErrNoPassword is the error when the OS_PASSWORD environment variable is not -// found -type ErrNoPassword struct{ gophercloud.ErrInvalidInput } - -func (e ErrNoPassword) Error() string { - return "Environment variable OS_PASSWORD needs to be set." -} diff --git a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/doc.go b/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/doc.go deleted file mode 100644 index 4562336..0000000 --- a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/doc.go +++ /dev/null @@ -1,65 +0,0 @@ -/* -Package tenants provides information and interaction with the -tenants API resource for the OpenStack Identity service. - -See http://developer.openstack.org/api-ref-identity-v2.html#identity-auth-v2 -and http://developer.openstack.org/api-ref-identity-v2.html#admin-tenants -for more information. - -Example to List Tenants - - listOpts := tenants.ListOpts{ - Limit: 2, - } - - allPages, err := tenants.List(identityClient, listOpts).AllPages() - if err != nil { - panic(err) - } - - allTenants, err := tenants.ExtractTenants(allPages) - if err != nil { - panic(err) - } - - for _, tenant := range allTenants { - fmt.Printf("%+v\n", tenant) - } - -Example to Create a Tenant - - createOpts := tenants.CreateOpts{ - Name: "tenant_name", - Description: "this is a tenant", - Enabled: gophercloud.Enabled, - } - - tenant, err := tenants.Create(identityClient, createOpts).Extract() - if err != nil { - panic(err) - } - -Example to Update a Tenant - - tenantID := "e6db6ed6277c461a853458589063b295" - - updateOpts := tenants.UpdateOpts{ - Description: "this is a new description", - Enabled: gophercloud.Disabled, - } - - tenant, err := tenants.Update(identityClient, tenantID, updateOpts).Extract() - if err != nil { - panic(err) - } - -Example to Delete a Tenant - - tenantID := "e6db6ed6277c461a853458589063b295" - - err := tenants.Delete(identitYClient, tenantID).ExtractErr() - if err != nil { - panic(err) - } -*/ -package tenants diff --git a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/requests.go b/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/requests.go deleted file mode 100644 index f21a58f..0000000 --- a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/requests.go +++ /dev/null @@ -1,116 +0,0 @@ -package tenants - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// ListOpts filters the Tenants that are returned by the List call. -type ListOpts struct { - // Marker is the ID of the last Tenant on the previous page. - Marker string `q:"marker"` - - // Limit specifies the page size. - Limit int `q:"limit"` -} - -// List enumerates the Tenants to which the current token has access. -func List(client *gophercloud.ServiceClient, opts *ListOpts) pagination.Pager { - url := listURL(client) - if opts != nil { - q, err := gophercloud.BuildQueryString(opts) - if err != nil { - return pagination.Pager{Err: err} - } - url += q.String() - } - return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { - return TenantPage{pagination.LinkedPageBase{PageResult: r}} - }) -} - -// CreateOpts represents the options needed when creating new tenant. -type CreateOpts struct { - // Name is the name of the tenant. - Name string `json:"name" required:"true"` - - // Description is the description of the tenant. - Description string `json:"description,omitempty"` - - // Enabled sets the tenant status to enabled or disabled. - Enabled *bool `json:"enabled,omitempty"` -} - -// CreateOptsBuilder enables extensions to add additional parameters to the -// Create request. -type CreateOptsBuilder interface { - ToTenantCreateMap() (map[string]interface{}, error) -} - -// ToTenantCreateMap assembles a request body based on the contents of -// a CreateOpts. -func (opts CreateOpts) ToTenantCreateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "tenant") -} - -// Create is the operation responsible for creating new tenant. -func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { - b, err := opts.ToTenantCreateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200, 201}, - }) - return -} - -// Get requests details on a single tenant by ID. -func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { - _, r.Err = client.Get(getURL(client, id), &r.Body, nil) - return -} - -// UpdateOptsBuilder allows extensions to add additional parameters to the -// Update request. -type UpdateOptsBuilder interface { - ToTenantUpdateMap() (map[string]interface{}, error) -} - -// UpdateOpts specifies the base attributes that may be updated on an existing -// tenant. -type UpdateOpts struct { - // Name is the name of the tenant. - Name string `json:"name,omitempty"` - - // Description is the description of the tenant. - Description *string `json:"description,omitempty"` - - // Enabled sets the tenant status to enabled or disabled. - Enabled *bool `json:"enabled,omitempty"` -} - -// ToTenantUpdateMap formats an UpdateOpts structure into a request body. -func (opts UpdateOpts) ToTenantUpdateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "tenant") -} - -// Update is the operation responsible for updating exist tenants by their TenantID. -func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { - b, err := opts.ToTenantUpdateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Put(updateURL(client, id), &b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200}, - }) - return -} - -// Delete is the operation responsible for permanently deleting a tenant. -func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { - _, r.Err = client.Delete(deleteURL(client, id), nil) - return -} diff --git a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/results.go b/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/results.go deleted file mode 100644 index bb6c2c6..0000000 --- a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/results.go +++ /dev/null @@ -1,91 +0,0 @@ -package tenants - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// Tenant is a grouping of users in the identity service. -type Tenant struct { - // ID is a unique identifier for this tenant. - ID string `json:"id"` - - // Name is a friendlier user-facing name for this tenant. - Name string `json:"name"` - - // Description is a human-readable explanation of this Tenant's purpose. - Description string `json:"description"` - - // Enabled indicates whether or not a tenant is active. - Enabled bool `json:"enabled"` -} - -// TenantPage is a single page of Tenant results. -type TenantPage struct { - pagination.LinkedPageBase -} - -// IsEmpty determines whether or not a page of Tenants contains any results. -func (r TenantPage) IsEmpty() (bool, error) { - tenants, err := ExtractTenants(r) - return len(tenants) == 0, err -} - -// NextPageURL extracts the "next" link from the tenants_links section of the result. -func (r TenantPage) NextPageURL() (string, error) { - var s struct { - Links []gophercloud.Link `json:"tenants_links"` - } - err := r.ExtractInto(&s) - if err != nil { - return "", err - } - return gophercloud.ExtractNextURL(s.Links) -} - -// ExtractTenants returns a slice of Tenants contained in a single page of -// results. -func ExtractTenants(r pagination.Page) ([]Tenant, error) { - var s struct { - Tenants []Tenant `json:"tenants"` - } - err := (r.(TenantPage)).ExtractInto(&s) - return s.Tenants, err -} - -type tenantResult struct { - gophercloud.Result -} - -// Extract interprets any tenantResults as a Tenant. -func (r tenantResult) Extract() (*Tenant, error) { - var s struct { - Tenant *Tenant `json:"tenant"` - } - err := r.ExtractInto(&s) - return s.Tenant, err -} - -// GetResult is the response from a Get request. Call its Extract method to -// interpret it as a Tenant. -type GetResult struct { - tenantResult -} - -// CreateResult is the response from a Create request. Call its Extract method -// to interpret it as a Tenant. -type CreateResult struct { - tenantResult -} - -// DeleteResult is the response from a Get request. Call its ExtractErr method -// to determine if the call succeeded or failed. -type DeleteResult struct { - gophercloud.ErrResult -} - -// UpdateResult is the response from a Update request. Call its Extract method -// to interpret it as a Tenant. -type UpdateResult struct { - tenantResult -} diff --git a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/urls.go b/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/urls.go deleted file mode 100644 index 0f02669..0000000 --- a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/urls.go +++ /dev/null @@ -1,23 +0,0 @@ -package tenants - -import "github.com/gophercloud/gophercloud" - -func listURL(client *gophercloud.ServiceClient) string { - return client.ServiceURL("tenants") -} - -func getURL(client *gophercloud.ServiceClient, tenantID string) string { - return client.ServiceURL("tenants", tenantID) -} - -func createURL(client *gophercloud.ServiceClient) string { - return client.ServiceURL("tenants") -} - -func deleteURL(client *gophercloud.ServiceClient, tenantID string) string { - return client.ServiceURL("tenants", tenantID) -} - -func updateURL(client *gophercloud.ServiceClient, tenantID string) string { - return client.ServiceURL("tenants", tenantID) -} diff --git a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/doc.go b/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/doc.go deleted file mode 100644 index 5375eea..0000000 --- a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/doc.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Package tokens provides information and interaction with the token API -resource for the OpenStack Identity service. - -For more information, see: -http://developer.openstack.org/api-ref-identity-v2.html#identity-auth-v2 - -Example to Create an Unscoped Token from a Password - - authOpts := gophercloud.AuthOptions{ - Username: "user", - Password: "pass" - } - - token, err := tokens.Create(identityClient, authOpts).ExtractToken() - if err != nil { - panic(err) - } - -Example to Create a Token from a Tenant ID and Password - - authOpts := gophercloud.AuthOptions{ - Username: "user", - Password: "password", - TenantID: "fc394f2ab2df4114bde39905f800dc57" - } - - token, err := tokens.Create(identityClient, authOpts).ExtractToken() - if err != nil { - panic(err) - } - -Example to Create a Token from a Tenant Name and Password - - authOpts := gophercloud.AuthOptions{ - Username: "user", - Password: "password", - TenantName: "tenantname" - } - - token, err := tokens.Create(identityClient, authOpts).ExtractToken() - if err != nil { - panic(err) - } -*/ -package tokens diff --git a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/requests.go b/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/requests.go deleted file mode 100644 index ab32368..0000000 --- a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/requests.go +++ /dev/null @@ -1,103 +0,0 @@ -package tokens - -import "github.com/gophercloud/gophercloud" - -// PasswordCredentialsV2 represents the required options to authenticate -// with a username and password. -type PasswordCredentialsV2 struct { - Username string `json:"username" required:"true"` - Password string `json:"password" required:"true"` -} - -// TokenCredentialsV2 represents the required options to authenticate -// with a token. -type TokenCredentialsV2 struct { - ID string `json:"id,omitempty" required:"true"` -} - -// AuthOptionsV2 wraps a gophercloud AuthOptions in order to adhere to the -// AuthOptionsBuilder interface. -type AuthOptionsV2 struct { - PasswordCredentials *PasswordCredentialsV2 `json:"passwordCredentials,omitempty" xor:"TokenCredentials"` - - // The TenantID and TenantName fields are optional for the Identity V2 API. - // Some providers allow you to specify a TenantName instead of the TenantId. - // Some require both. Your provider's authentication policies will determine - // how these fields influence authentication. - TenantID string `json:"tenantId,omitempty"` - TenantName string `json:"tenantName,omitempty"` - - // TokenCredentials allows users to authenticate (possibly as another user) - // with an authentication token ID. - TokenCredentials *TokenCredentialsV2 `json:"token,omitempty" xor:"PasswordCredentials"` -} - -// AuthOptionsBuilder allows extensions to add additional parameters to the -// token create request. -type AuthOptionsBuilder interface { - // ToTokenCreateMap assembles the Create request body, returning an error - // if parameters are missing or inconsistent. - ToTokenV2CreateMap() (map[string]interface{}, error) -} - -// AuthOptions are the valid options for Openstack Identity v2 authentication. -// For field descriptions, see gophercloud.AuthOptions. -type AuthOptions struct { - IdentityEndpoint string `json:"-"` - Username string `json:"username,omitempty"` - Password string `json:"password,omitempty"` - TenantID string `json:"tenantId,omitempty"` - TenantName string `json:"tenantName,omitempty"` - AllowReauth bool `json:"-"` - TokenID string -} - -// ToTokenV2CreateMap builds a token request body from the given AuthOptions. -func (opts AuthOptions) ToTokenV2CreateMap() (map[string]interface{}, error) { - v2Opts := AuthOptionsV2{ - TenantID: opts.TenantID, - TenantName: opts.TenantName, - } - - if opts.Password != "" { - v2Opts.PasswordCredentials = &PasswordCredentialsV2{ - Username: opts.Username, - Password: opts.Password, - } - } else { - v2Opts.TokenCredentials = &TokenCredentialsV2{ - ID: opts.TokenID, - } - } - - b, err := gophercloud.BuildRequestBody(v2Opts, "auth") - if err != nil { - return nil, err - } - return b, nil -} - -// Create authenticates to the identity service and attempts to acquire a Token. -// Generally, rather than interact with this call directly, end users should -// call openstack.AuthenticatedClient(), which abstracts all of the gory details -// about navigating service catalogs and such. -func Create(client *gophercloud.ServiceClient, auth AuthOptionsBuilder) (r CreateResult) { - b, err := auth.ToTokenV2CreateMap() - if err != nil { - r.Err = err - return - } - _, r.Err = client.Post(CreateURL(client), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200, 203}, - MoreHeaders: map[string]string{"X-Auth-Token": ""}, - }) - return -} - -// Get validates and retrieves information for user's token. -func Get(client *gophercloud.ServiceClient, token string) (r GetResult) { - _, r.Err = client.Get(GetURL(client, token), &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200, 203}, - }) - return -} diff --git a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/results.go b/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/results.go deleted file mode 100644 index ee5da37..0000000 --- a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/results.go +++ /dev/null @@ -1,174 +0,0 @@ -package tokens - -import ( - "time" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/identity/v2/tenants" -) - -// Token provides only the most basic information related to an authentication -// token. -type Token struct { - // ID provides the primary means of identifying a user to the OpenStack API. - // OpenStack defines this field as an opaque value, so do not depend on its - // content. It is safe, however, to compare for equality. - ID string - - // ExpiresAt provides a timestamp in ISO 8601 format, indicating when the - // authentication token becomes invalid. After this point in time, future - // API requests made using this authentication token will respond with - // errors. Either the caller will need to reauthenticate manually, or more - // preferably, the caller should exploit automatic re-authentication. - // See the AuthOptions structure for more details. - ExpiresAt time.Time - - // Tenant provides information about the tenant to which this token grants - // access. - Tenant tenants.Tenant -} - -// Role is a role for a user. -type Role struct { - Name string `json:"name"` -} - -// User is an OpenStack user. -type User struct { - ID string `json:"id"` - Name string `json:"name"` - UserName string `json:"username"` - Roles []Role `json:"roles"` -} - -// Endpoint represents a single API endpoint offered by a service. -// It provides the public and internal URLs, if supported, along with a region -// specifier, again if provided. -// -// The significance of the Region field will depend upon your provider. -// -// In addition, the interface offered by the service will have version -// information associated with it through the VersionId, VersionInfo, and -// VersionList fields, if provided or supported. -// -// In all cases, fields which aren't supported by the provider and service -// combined will assume a zero-value (""). -type Endpoint struct { - TenantID string `json:"tenantId"` - PublicURL string `json:"publicURL"` - InternalURL string `json:"internalURL"` - AdminURL string `json:"adminURL"` - Region string `json:"region"` - VersionID string `json:"versionId"` - VersionInfo string `json:"versionInfo"` - VersionList string `json:"versionList"` -} - -// CatalogEntry provides a type-safe interface to an Identity API V2 service -// catalog listing. -// -// Each class of service, such as cloud DNS or block storage services, will have -// a single CatalogEntry representing it. -// -// Note: when looking for the desired service, try, whenever possible, to key -// off the type field. Otherwise, you'll tie the representation of the service -// to a specific provider. -type CatalogEntry struct { - // Name will contain the provider-specified name for the service. - Name string `json:"name"` - - // Type will contain a type string if OpenStack defines a type for the - // service. Otherwise, for provider-specific services, the provider may assign - // their own type strings. - Type string `json:"type"` - - // Endpoints will let the caller iterate over all the different endpoints that - // may exist for the service. - Endpoints []Endpoint `json:"endpoints"` -} - -// ServiceCatalog provides a view into the service catalog from a previous, -// successful authentication. -type ServiceCatalog struct { - Entries []CatalogEntry -} - -// CreateResult is the response from a Create request. Use ExtractToken() to -// interpret it as a Token, or ExtractServiceCatalog() to interpret it as a -// service catalog. -type CreateResult struct { - gophercloud.Result -} - -// GetResult is the deferred response from a Get call, which is the same with a -// Created token. Use ExtractUser() to interpret it as a User. -type GetResult struct { - CreateResult -} - -// ExtractToken returns the just-created Token from a CreateResult. -func (r CreateResult) ExtractToken() (*Token, error) { - var s struct { - Access struct { - Token struct { - Expires string `json:"expires"` - ID string `json:"id"` - Tenant tenants.Tenant `json:"tenant"` - } `json:"token"` - } `json:"access"` - } - - err := r.ExtractInto(&s) - if err != nil { - return nil, err - } - - expiresTs, err := time.Parse(gophercloud.RFC3339Milli, s.Access.Token.Expires) - if err != nil { - return nil, err - } - - return &Token{ - ID: s.Access.Token.ID, - ExpiresAt: expiresTs, - Tenant: s.Access.Token.Tenant, - }, nil -} - -// ExtractTokenID implements the gophercloud.AuthResult interface. The returned -// string is the same as the ID field of the Token struct returned from -// ExtractToken(). -func (r CreateResult) ExtractTokenID() (string, error) { - var s struct { - Access struct { - Token struct { - ID string `json:"id"` - } `json:"token"` - } `json:"access"` - } - err := r.ExtractInto(&s) - return s.Access.Token.ID, err -} - -// ExtractServiceCatalog returns the ServiceCatalog that was generated along -// with the user's Token. -func (r CreateResult) ExtractServiceCatalog() (*ServiceCatalog, error) { - var s struct { - Access struct { - Entries []CatalogEntry `json:"serviceCatalog"` - } `json:"access"` - } - err := r.ExtractInto(&s) - return &ServiceCatalog{Entries: s.Access.Entries}, err -} - -// ExtractUser returns the User from a GetResult. -func (r GetResult) ExtractUser() (*User, error) { - var s struct { - Access struct { - User User `json:"user"` - } `json:"access"` - } - err := r.ExtractInto(&s) - return &s.Access.User, err -} diff --git a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/urls.go b/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/urls.go deleted file mode 100644 index ee0a28f..0000000 --- a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/urls.go +++ /dev/null @@ -1,13 +0,0 @@ -package tokens - -import "github.com/gophercloud/gophercloud" - -// CreateURL generates the URL used to create new Tokens. -func CreateURL(client *gophercloud.ServiceClient) string { - return client.ServiceURL("tokens") -} - -// GetURL generates the URL used to Validate Tokens. -func GetURL(client *gophercloud.ServiceClient, token string) string { - return client.ServiceURL("tokens", token) -} diff --git a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/doc.go b/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/doc.go deleted file mode 100644 index 966e128..0000000 --- a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/doc.go +++ /dev/null @@ -1,108 +0,0 @@ -/* -Package tokens provides information and interaction with the token API -resource for the OpenStack Identity service. - -For more information, see: -http://developer.openstack.org/api-ref-identity-v3.html#tokens-v3 - -Example to Create a Token From a Username and Password - - authOptions := tokens.AuthOptions{ - UserID: "username", - Password: "password", - } - - token, err := tokens.Create(identityClient, authOptions).ExtractToken() - if err != nil { - panic(err) - } - -Example to Create a Token From a Username, Password, and Domain - - authOptions := tokens.AuthOptions{ - UserID: "username", - Password: "password", - DomainID: "default", - } - - token, err := tokens.Create(identityClient, authOptions).ExtractToken() - if err != nil { - panic(err) - } - - authOptions = tokens.AuthOptions{ - UserID: "username", - Password: "password", - DomainName: "default", - } - - token, err = tokens.Create(identityClient, authOptions).ExtractToken() - if err != nil { - panic(err) - } - -Example to Create a Token From a Token - - authOptions := tokens.AuthOptions{ - TokenID: "token_id", - } - - token, err := tokens.Create(identityClient, authOptions).ExtractToken() - if err != nil { - panic(err) - } - -Example to Create a Token from a Username and Password with Project ID Scope - - scope := tokens.Scope{ - ProjectID: "0fe36e73809d46aeae6705c39077b1b3", - } - - authOptions := tokens.AuthOptions{ - Scope: &scope, - UserID: "username", - Password: "password", - } - - token, err = tokens.Create(identityClient, authOptions).ExtractToken() - if err != nil { - panic(err) - } - -Example to Create a Token from a Username and Password with Domain ID Scope - - scope := tokens.Scope{ - DomainID: "default", - } - - authOptions := tokens.AuthOptions{ - Scope: &scope, - UserID: "username", - Password: "password", - } - - token, err = tokens.Create(identityClient, authOptions).ExtractToken() - if err != nil { - panic(err) - } - -Example to Create a Token from a Username and Password with Project Name Scope - - scope := tokens.Scope{ - ProjectName: "project_name", - DomainID: "default", - } - - authOptions := tokens.AuthOptions{ - Scope: &scope, - UserID: "username", - Password: "password", - } - - token, err = tokens.Create(identityClient, authOptions).ExtractToken() - if err != nil { - panic(err) - } - -*/ -package tokens diff --git a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/requests.go b/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/requests.go deleted file mode 100644 index 2d20fa6..0000000 --- a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/requests.go +++ /dev/null @@ -1,162 +0,0 @@ -package tokens - -import "github.com/gophercloud/gophercloud" - -// Scope allows a created token to be limited to a specific domain or project. -type Scope struct { - ProjectID string - ProjectName string - DomainID string - DomainName string -} - -// AuthOptionsBuilder provides the ability for extensions to add additional -// parameters to AuthOptions. Extensions must satisfy all required methods. -type AuthOptionsBuilder interface { - // ToTokenV3CreateMap assembles the Create request body, returning an error - // if parameters are missing or inconsistent. - ToTokenV3CreateMap(map[string]interface{}) (map[string]interface{}, error) - ToTokenV3ScopeMap() (map[string]interface{}, error) - CanReauth() bool -} - -// AuthOptions represents options for authenticating a user. -type AuthOptions struct { - // IdentityEndpoint specifies the HTTP endpoint that is required to work with - // the Identity API of the appropriate version. While it's ultimately needed - // by all of the identity services, it will often be populated by a - // provider-level function. - IdentityEndpoint string `json:"-"` - - // Username is required if using Identity V2 API. Consult with your provider's - // control panel to discover your account's username. In Identity V3, either - // UserID or a combination of Username and DomainID or DomainName are needed. - Username string `json:"username,omitempty"` - UserID string `json:"id,omitempty"` - - Password string `json:"password,omitempty"` - - // At most one of DomainID and DomainName must be provided if using Username - // with Identity V3. Otherwise, either are optional. - DomainID string `json:"-"` - DomainName string `json:"name,omitempty"` - - // AllowReauth should be set to true if you grant permission for Gophercloud - // to cache your credentials in memory, and to allow Gophercloud to attempt - // to re-authenticate automatically if/when your token expires. If you set - // it to false, it will not cache these settings, but re-authentication will - // not be possible. This setting defaults to false. - AllowReauth bool `json:"-"` - - // TokenID allows users to authenticate (possibly as another user) with an - // authentication token ID. - TokenID string `json:"-"` - - // Authentication through Application Credentials requires supplying name, project and secret - // For project we can use TenantID - ApplicationCredentialID string `json:"-"` - ApplicationCredentialName string `json:"-"` - ApplicationCredentialSecret string `json:"-"` - - Scope Scope `json:"-"` -} - -// ToTokenV3CreateMap builds a request body from AuthOptions. -func (opts *AuthOptions) ToTokenV3CreateMap(scope map[string]interface{}) (map[string]interface{}, error) { - gophercloudAuthOpts := gophercloud.AuthOptions{ - Username: opts.Username, - UserID: opts.UserID, - Password: opts.Password, - DomainID: opts.DomainID, - DomainName: opts.DomainName, - AllowReauth: opts.AllowReauth, - TokenID: opts.TokenID, - ApplicationCredentialID: opts.ApplicationCredentialID, - ApplicationCredentialName: opts.ApplicationCredentialName, - ApplicationCredentialSecret: opts.ApplicationCredentialSecret, - } - - return gophercloudAuthOpts.ToTokenV3CreateMap(scope) -} - -// ToTokenV3CreateMap builds a scope request body from AuthOptions. -func (opts *AuthOptions) ToTokenV3ScopeMap() (map[string]interface{}, error) { - scope := gophercloud.AuthScope(opts.Scope) - - gophercloudAuthOpts := gophercloud.AuthOptions{ - Scope: &scope, - DomainID: opts.DomainID, - DomainName: opts.DomainName, - } - - return gophercloudAuthOpts.ToTokenV3ScopeMap() -} - -func (opts *AuthOptions) CanReauth() bool { - return opts.AllowReauth -} - -func subjectTokenHeaders(c *gophercloud.ServiceClient, subjectToken string) map[string]string { - return map[string]string{ - "X-Subject-Token": subjectToken, - } -} - -// Create authenticates and either generates a new token, or changes the Scope -// of an existing token. -func Create(c *gophercloud.ServiceClient, opts AuthOptionsBuilder) (r CreateResult) { - scope, err := opts.ToTokenV3ScopeMap() - if err != nil { - r.Err = err - return - } - - b, err := opts.ToTokenV3CreateMap(scope) - if err != nil { - r.Err = err - return - } - - resp, err := c.Post(tokenURL(c), b, &r.Body, &gophercloud.RequestOpts{ - MoreHeaders: map[string]string{"X-Auth-Token": ""}, - }) - r.Err = err - if resp != nil { - r.Header = resp.Header - } - return -} - -// Get validates and retrieves information about another token. -func Get(c *gophercloud.ServiceClient, token string) (r GetResult) { - resp, err := c.Get(tokenURL(c), &r.Body, &gophercloud.RequestOpts{ - MoreHeaders: subjectTokenHeaders(c, token), - OkCodes: []int{200, 203}, - }) - if resp != nil { - r.Err = err - r.Header = resp.Header - } - return -} - -// Validate determines if a specified token is valid or not. -func Validate(c *gophercloud.ServiceClient, token string) (bool, error) { - resp, err := c.Head(tokenURL(c), &gophercloud.RequestOpts{ - MoreHeaders: subjectTokenHeaders(c, token), - OkCodes: []int{200, 204, 404}, - }) - if err != nil { - return false, err - } - - return resp.StatusCode == 200 || resp.StatusCode == 204, nil -} - -// Revoke immediately makes specified token invalid. -func Revoke(c *gophercloud.ServiceClient, token string) (r RevokeResult) { - _, r.Err = c.Delete(tokenURL(c), &gophercloud.RequestOpts{ - MoreHeaders: subjectTokenHeaders(c, token), - }) - return -} diff --git a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/results.go b/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/results.go deleted file mode 100644 index 6f26c96..0000000 --- a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/results.go +++ /dev/null @@ -1,178 +0,0 @@ -package tokens - -import ( - "time" - - "github.com/gophercloud/gophercloud" -) - -// Endpoint represents a single API endpoint offered by a service. -// It matches either a public, internal or admin URL. -// If supported, it contains a region specifier, again if provided. -// The significance of the Region field will depend upon your provider. -type Endpoint struct { - ID string `json:"id"` - Region string `json:"region"` - RegionID string `json:"region_id"` - Interface string `json:"interface"` - URL string `json:"url"` -} - -// CatalogEntry provides a type-safe interface to an Identity API V3 service -// catalog listing. Each class of service, such as cloud DNS or block storage -// services, could have multiple CatalogEntry representing it (one by interface -// type, e.g public, admin or internal). -// -// Note: when looking for the desired service, try, whenever possible, to key -// off the type field. Otherwise, you'll tie the representation of the service -// to a specific provider. -type CatalogEntry struct { - // Service ID - ID string `json:"id"` - - // Name will contain the provider-specified name for the service. - Name string `json:"name"` - - // Type will contain a type string if OpenStack defines a type for the - // service. Otherwise, for provider-specific services, the provider may - // assign their own type strings. - Type string `json:"type"` - - // Endpoints will let the caller iterate over all the different endpoints that - // may exist for the service. - Endpoints []Endpoint `json:"endpoints"` -} - -// ServiceCatalog provides a view into the service catalog from a previous, -// successful authentication. -type ServiceCatalog struct { - Entries []CatalogEntry `json:"catalog"` -} - -// Domain provides information about the domain to which this token grants -// access. -type Domain struct { - ID string `json:"id"` - Name string `json:"name"` -} - -// User represents a user resource that exists in the Identity Service. -type User struct { - Domain Domain `json:"domain"` - ID string `json:"id"` - Name string `json:"name"` -} - -// Role provides information about roles to which User is authorized. -type Role struct { - ID string `json:"id"` - Name string `json:"name"` -} - -// Project provides information about project to which User is authorized. -type Project struct { - Domain Domain `json:"domain"` - ID string `json:"id"` - Name string `json:"name"` -} - -// commonResult is the response from a request. A commonResult has various -// methods which can be used to extract different details about the result. -type commonResult struct { - gophercloud.Result -} - -// Extract is a shortcut for ExtractToken. -// This function is deprecated and still present for backward compatibility. -func (r commonResult) Extract() (*Token, error) { - return r.ExtractToken() -} - -// ExtractToken interprets a commonResult as a Token. -func (r commonResult) ExtractToken() (*Token, error) { - var s Token - err := r.ExtractInto(&s) - if err != nil { - return nil, err - } - - // Parse the token itself from the stored headers. - s.ID = r.Header.Get("X-Subject-Token") - - return &s, err -} - -// ExtractTokenID implements the gophercloud.AuthResult interface. The returned -// string is the same as the ID field of the Token struct returned from -// ExtractToken(). -func (r CreateResult) ExtractTokenID() (string, error) { - return r.Header.Get("X-Subject-Token"), r.Err -} - -// ExtractServiceCatalog returns the ServiceCatalog that was generated along -// with the user's Token. -func (r commonResult) ExtractServiceCatalog() (*ServiceCatalog, error) { - var s ServiceCatalog - err := r.ExtractInto(&s) - return &s, err -} - -// ExtractUser returns the User that is the owner of the Token. -func (r commonResult) ExtractUser() (*User, error) { - var s struct { - User *User `json:"user"` - } - err := r.ExtractInto(&s) - return s.User, err -} - -// ExtractRoles returns Roles to which User is authorized. -func (r commonResult) ExtractRoles() ([]Role, error) { - var s struct { - Roles []Role `json:"roles"` - } - err := r.ExtractInto(&s) - return s.Roles, err -} - -// ExtractProject returns Project to which User is authorized. -func (r commonResult) ExtractProject() (*Project, error) { - var s struct { - Project *Project `json:"project"` - } - err := r.ExtractInto(&s) - return s.Project, err -} - -// CreateResult is the response from a Create request. Use ExtractToken() -// to interpret it as a Token, or ExtractServiceCatalog() to interpret it -// as a service catalog. -type CreateResult struct { - commonResult -} - -// GetResult is the response from a Get request. Use ExtractToken() -// to interpret it as a Token, or ExtractServiceCatalog() to interpret it -// as a service catalog. -type GetResult struct { - commonResult -} - -// RevokeResult is response from a Revoke request. -type RevokeResult struct { - commonResult -} - -// Token is a string that grants a user access to a controlled set of services -// in an OpenStack provider. Each Token is valid for a set length of time. -type Token struct { - // ID is the issued token. - ID string `json:"id"` - - // ExpiresAt is the timestamp at which this token will no longer be accepted. - ExpiresAt time.Time `json:"expires_at"` -} - -func (r commonResult) ExtractInto(v interface{}) error { - return r.ExtractIntoStructPtr(v, "token") -} diff --git a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/urls.go b/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/urls.go deleted file mode 100644 index 2f864a3..0000000 --- a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/urls.go +++ /dev/null @@ -1,7 +0,0 @@ -package tokens - -import "github.com/gophercloud/gophercloud" - -func tokenURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL("auth", "tokens") -} diff --git a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/utils/base_endpoint.go b/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/utils/base_endpoint.go deleted file mode 100644 index 40080f7..0000000 --- a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/utils/base_endpoint.go +++ /dev/null @@ -1,28 +0,0 @@ -package utils - -import ( - "net/url" - "regexp" - "strings" -) - -// BaseEndpoint will return a URL without the /vX.Y -// portion of the URL. -func BaseEndpoint(endpoint string) (string, error) { - u, err := url.Parse(endpoint) - if err != nil { - return "", err - } - - u.RawQuery, u.Fragment = "", "" - - path := u.Path - versionRe := regexp.MustCompile("v[0-9.]+/?") - - if version := versionRe.FindString(path); version != "" { - versionIndex := strings.Index(path, version) - u.Path = path[:versionIndex] - } - - return u.String(), nil -} diff --git a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/utils/choose_version.go b/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/utils/choose_version.go deleted file mode 100644 index 27da19f..0000000 --- a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/openstack/utils/choose_version.go +++ /dev/null @@ -1,111 +0,0 @@ -package utils - -import ( - "fmt" - "strings" - - "github.com/gophercloud/gophercloud" -) - -// Version is a supported API version, corresponding to a vN package within the appropriate service. -type Version struct { - ID string - Suffix string - Priority int -} - -var goodStatus = map[string]bool{ - "current": true, - "supported": true, - "stable": true, -} - -// ChooseVersion queries the base endpoint of an API to choose the most recent non-experimental alternative from a service's -// published versions. -// It returns the highest-Priority Version among the alternatives that are provided, as well as its corresponding endpoint. -func ChooseVersion(client *gophercloud.ProviderClient, recognized []*Version) (*Version, string, error) { - type linkResp struct { - Href string `json:"href"` - Rel string `json:"rel"` - } - - type valueResp struct { - ID string `json:"id"` - Status string `json:"status"` - Links []linkResp `json:"links"` - } - - type versionsResp struct { - Values []valueResp `json:"values"` - } - - type response struct { - Versions versionsResp `json:"versions"` - } - - normalize := func(endpoint string) string { - if !strings.HasSuffix(endpoint, "/") { - return endpoint + "/" - } - return endpoint - } - identityEndpoint := normalize(client.IdentityEndpoint) - - // If a full endpoint is specified, check version suffixes for a match first. - for _, v := range recognized { - if strings.HasSuffix(identityEndpoint, v.Suffix) { - return v, identityEndpoint, nil - } - } - - var resp response - _, err := client.Request("GET", client.IdentityBase, &gophercloud.RequestOpts{ - JSONResponse: &resp, - OkCodes: []int{200, 300}, - }) - - if err != nil { - return nil, "", err - } - - var highest *Version - var endpoint string - - for _, value := range resp.Versions.Values { - href := "" - for _, link := range value.Links { - if link.Rel == "self" { - href = normalize(link.Href) - } - } - - for _, version := range recognized { - if strings.Contains(value.ID, version.ID) { - // Prefer a version that exactly matches the provided endpoint. - if href == identityEndpoint { - if href == "" { - return nil, "", fmt.Errorf("Endpoint missing in version %s response from %s", value.ID, client.IdentityBase) - } - return version, href, nil - } - - // Otherwise, find the highest-priority version with a whitelisted status. - if goodStatus[strings.ToLower(value.Status)] { - if highest == nil || version.Priority > highest.Priority { - highest = version - endpoint = href - } - } - } - } - } - - if highest == nil { - return nil, "", fmt.Errorf("No supported version available from endpoint %s", client.IdentityBase) - } - if endpoint == "" { - return nil, "", fmt.Errorf("Endpoint missing in version %s response from %s", highest.ID, client.IdentityBase) - } - - return highest, endpoint, nil -} diff --git a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/pagination/http.go b/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/pagination/http.go deleted file mode 100644 index 757295c..0000000 --- a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/pagination/http.go +++ /dev/null @@ -1,60 +0,0 @@ -package pagination - -import ( - "encoding/json" - "io/ioutil" - "net/http" - "net/url" - "strings" - - "github.com/gophercloud/gophercloud" -) - -// PageResult stores the HTTP response that returned the current page of results. -type PageResult struct { - gophercloud.Result - url.URL -} - -// PageResultFrom parses an HTTP response as JSON and returns a PageResult containing the -// results, interpreting it as JSON if the content type indicates. -func PageResultFrom(resp *http.Response) (PageResult, error) { - var parsedBody interface{} - - defer resp.Body.Close() - rawBody, err := ioutil.ReadAll(resp.Body) - if err != nil { - return PageResult{}, err - } - - if strings.HasPrefix(resp.Header.Get("Content-Type"), "application/json") { - err = json.Unmarshal(rawBody, &parsedBody) - if err != nil { - return PageResult{}, err - } - } else { - parsedBody = rawBody - } - - return PageResultFromParsed(resp, parsedBody), err -} - -// PageResultFromParsed constructs a PageResult from an HTTP response that has already had its -// body parsed as JSON (and closed). -func PageResultFromParsed(resp *http.Response, body interface{}) PageResult { - return PageResult{ - Result: gophercloud.Result{ - Body: body, - Header: resp.Header, - }, - URL: *resp.Request.URL, - } -} - -// Request performs an HTTP request and extracts the http.Response from the result. -func Request(client *gophercloud.ServiceClient, headers map[string]string, url string) (*http.Response, error) { - return client.Get(url, nil, &gophercloud.RequestOpts{ - MoreHeaders: headers, - OkCodes: []int{200, 204, 300}, - }) -} diff --git a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/pagination/linked.go b/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/pagination/linked.go deleted file mode 100644 index 3656fb7..0000000 --- a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/pagination/linked.go +++ /dev/null @@ -1,92 +0,0 @@ -package pagination - -import ( - "fmt" - "reflect" - - "github.com/gophercloud/gophercloud" -) - -// LinkedPageBase may be embedded to implement a page that provides navigational "Next" and "Previous" links within its result. -type LinkedPageBase struct { - PageResult - - // LinkPath lists the keys that should be traversed within a response to arrive at the "next" pointer. - // If any link along the path is missing, an empty URL will be returned. - // If any link results in an unexpected value type, an error will be returned. - // When left as "nil", []string{"links", "next"} will be used as a default. - LinkPath []string -} - -// NextPageURL extracts the pagination structure from a JSON response and returns the "next" link, if one is present. -// It assumes that the links are available in a "links" element of the top-level response object. -// If this is not the case, override NextPageURL on your result type. -func (current LinkedPageBase) NextPageURL() (string, error) { - var path []string - var key string - - if current.LinkPath == nil { - path = []string{"links", "next"} - } else { - path = current.LinkPath - } - - submap, ok := current.Body.(map[string]interface{}) - if !ok { - err := gophercloud.ErrUnexpectedType{} - err.Expected = "map[string]interface{}" - err.Actual = fmt.Sprintf("%v", reflect.TypeOf(current.Body)) - return "", err - } - - for { - key, path = path[0], path[1:len(path)] - - value, ok := submap[key] - if !ok { - return "", nil - } - - if len(path) > 0 { - submap, ok = value.(map[string]interface{}) - if !ok { - err := gophercloud.ErrUnexpectedType{} - err.Expected = "map[string]interface{}" - err.Actual = fmt.Sprintf("%v", reflect.TypeOf(value)) - return "", err - } - } else { - if value == nil { - // Actual null element. - return "", nil - } - - url, ok := value.(string) - if !ok { - err := gophercloud.ErrUnexpectedType{} - err.Expected = "string" - err.Actual = fmt.Sprintf("%v", reflect.TypeOf(value)) - return "", err - } - - return url, nil - } - } -} - -// IsEmpty satisifies the IsEmpty method of the Page interface -func (current LinkedPageBase) IsEmpty() (bool, error) { - if b, ok := current.Body.([]interface{}); ok { - return len(b) == 0, nil - } - err := gophercloud.ErrUnexpectedType{} - err.Expected = "[]interface{}" - err.Actual = fmt.Sprintf("%v", reflect.TypeOf(current.Body)) - return true, err -} - -// GetBody returns the linked page's body. This method is needed to satisfy the -// Page interface. -func (current LinkedPageBase) GetBody() interface{} { - return current.Body -} diff --git a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/pagination/marker.go b/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/pagination/marker.go deleted file mode 100644 index 52e53ba..0000000 --- a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/pagination/marker.go +++ /dev/null @@ -1,58 +0,0 @@ -package pagination - -import ( - "fmt" - "reflect" - - "github.com/gophercloud/gophercloud" -) - -// MarkerPage is a stricter Page interface that describes additional functionality required for use with NewMarkerPager. -// For convenience, embed the MarkedPageBase struct. -type MarkerPage interface { - Page - - // LastMarker returns the last "marker" value on this page. - LastMarker() (string, error) -} - -// MarkerPageBase is a page in a collection that's paginated by "limit" and "marker" query parameters. -type MarkerPageBase struct { - PageResult - - // Owner is a reference to the embedding struct. - Owner MarkerPage -} - -// NextPageURL generates the URL for the page of results after this one. -func (current MarkerPageBase) NextPageURL() (string, error) { - currentURL := current.URL - - mark, err := current.Owner.LastMarker() - if err != nil { - return "", err - } - - q := currentURL.Query() - q.Set("marker", mark) - currentURL.RawQuery = q.Encode() - - return currentURL.String(), nil -} - -// IsEmpty satisifies the IsEmpty method of the Page interface -func (current MarkerPageBase) IsEmpty() (bool, error) { - if b, ok := current.Body.([]interface{}); ok { - return len(b) == 0, nil - } - err := gophercloud.ErrUnexpectedType{} - err.Expected = "[]interface{}" - err.Actual = fmt.Sprintf("%v", reflect.TypeOf(current.Body)) - return true, err -} - -// GetBody returns the linked page's body. This method is needed to satisfy the -// Page interface. -func (current MarkerPageBase) GetBody() interface{} { - return current.Body -} diff --git a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/pagination/pager.go b/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/pagination/pager.go deleted file mode 100644 index 42c0b2d..0000000 --- a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/pagination/pager.go +++ /dev/null @@ -1,251 +0,0 @@ -package pagination - -import ( - "errors" - "fmt" - "net/http" - "reflect" - "strings" - - "github.com/gophercloud/gophercloud" -) - -var ( - // ErrPageNotAvailable is returned from a Pager when a next or previous page is requested, but does not exist. - ErrPageNotAvailable = errors.New("The requested page does not exist.") -) - -// Page must be satisfied by the result type of any resource collection. -// It allows clients to interact with the resource uniformly, regardless of whether or not or how it's paginated. -// Generally, rather than implementing this interface directly, implementors should embed one of the concrete PageBase structs, -// instead. -// Depending on the pagination strategy of a particular resource, there may be an additional subinterface that the result type -// will need to implement. -type Page interface { - // NextPageURL generates the URL for the page of data that follows this collection. - // Return "" if no such page exists. - NextPageURL() (string, error) - - // IsEmpty returns true if this Page has no items in it. - IsEmpty() (bool, error) - - // GetBody returns the Page Body. This is used in the `AllPages` method. - GetBody() interface{} -} - -// Pager knows how to advance through a specific resource collection, one page at a time. -type Pager struct { - client *gophercloud.ServiceClient - - initialURL string - - createPage func(r PageResult) Page - - firstPage Page - - Err error - - // Headers supplies additional HTTP headers to populate on each paged request. - Headers map[string]string -} - -// NewPager constructs a manually-configured pager. -// Supply the URL for the first page, a function that requests a specific page given a URL, and a function that counts a page. -func NewPager(client *gophercloud.ServiceClient, initialURL string, createPage func(r PageResult) Page) Pager { - return Pager{ - client: client, - initialURL: initialURL, - createPage: createPage, - } -} - -// WithPageCreator returns a new Pager that substitutes a different page creation function. This is -// useful for overriding List functions in delegation. -func (p Pager) WithPageCreator(createPage func(r PageResult) Page) Pager { - return Pager{ - client: p.client, - initialURL: p.initialURL, - createPage: createPage, - } -} - -func (p Pager) fetchNextPage(url string) (Page, error) { - resp, err := Request(p.client, p.Headers, url) - if err != nil { - return nil, err - } - - remembered, err := PageResultFrom(resp) - if err != nil { - return nil, err - } - - return p.createPage(remembered), nil -} - -// EachPage iterates over each page returned by a Pager, yielding one at a time to a handler function. -// Return "false" from the handler to prematurely stop iterating. -func (p Pager) EachPage(handler func(Page) (bool, error)) error { - if p.Err != nil { - return p.Err - } - currentURL := p.initialURL - for { - var currentPage Page - - // if first page has already been fetched, no need to fetch it again - if p.firstPage != nil { - currentPage = p.firstPage - p.firstPage = nil - } else { - var err error - currentPage, err = p.fetchNextPage(currentURL) - if err != nil { - return err - } - } - - empty, err := currentPage.IsEmpty() - if err != nil { - return err - } - if empty { - return nil - } - - ok, err := handler(currentPage) - if err != nil { - return err - } - if !ok { - return nil - } - - currentURL, err = currentPage.NextPageURL() - if err != nil { - return err - } - if currentURL == "" { - return nil - } - } -} - -// AllPages returns all the pages from a `List` operation in a single page, -// allowing the user to retrieve all the pages at once. -func (p Pager) AllPages() (Page, error) { - // pagesSlice holds all the pages until they get converted into as Page Body. - var pagesSlice []interface{} - // body will contain the final concatenated Page body. - var body reflect.Value - - // Grab a first page to ascertain the page body type. - firstPage, err := p.fetchNextPage(p.initialURL) - if err != nil { - return nil, err - } - // Store the page type so we can use reflection to create a new mega-page of - // that type. - pageType := reflect.TypeOf(firstPage) - - // if it's a single page, just return the firstPage (first page) - if _, found := pageType.FieldByName("SinglePageBase"); found { - return firstPage, nil - } - - // store the first page to avoid getting it twice - p.firstPage = firstPage - - // Switch on the page body type. Recognized types are `map[string]interface{}`, - // `[]byte`, and `[]interface{}`. - switch pb := firstPage.GetBody().(type) { - case map[string]interface{}: - // key is the map key for the page body if the body type is `map[string]interface{}`. - var key string - // Iterate over the pages to concatenate the bodies. - err = p.EachPage(func(page Page) (bool, error) { - b := page.GetBody().(map[string]interface{}) - for k, v := range b { - // If it's a linked page, we don't want the `links`, we want the other one. - if !strings.HasSuffix(k, "links") { - // check the field's type. we only want []interface{} (which is really []map[string]interface{}) - switch vt := v.(type) { - case []interface{}: - key = k - pagesSlice = append(pagesSlice, vt...) - } - } - } - return true, nil - }) - if err != nil { - return nil, err - } - // Set body to value of type `map[string]interface{}` - body = reflect.MakeMap(reflect.MapOf(reflect.TypeOf(key), reflect.TypeOf(pagesSlice))) - body.SetMapIndex(reflect.ValueOf(key), reflect.ValueOf(pagesSlice)) - case []byte: - // Iterate over the pages to concatenate the bodies. - err = p.EachPage(func(page Page) (bool, error) { - b := page.GetBody().([]byte) - pagesSlice = append(pagesSlice, b) - // seperate pages with a comma - pagesSlice = append(pagesSlice, []byte{10}) - return true, nil - }) - if err != nil { - return nil, err - } - if len(pagesSlice) > 0 { - // Remove the trailing comma. - pagesSlice = pagesSlice[:len(pagesSlice)-1] - } - var b []byte - // Combine the slice of slices in to a single slice. - for _, slice := range pagesSlice { - b = append(b, slice.([]byte)...) - } - // Set body to value of type `bytes`. - body = reflect.New(reflect.TypeOf(b)).Elem() - body.SetBytes(b) - case []interface{}: - // Iterate over the pages to concatenate the bodies. - err = p.EachPage(func(page Page) (bool, error) { - b := page.GetBody().([]interface{}) - pagesSlice = append(pagesSlice, b...) - return true, nil - }) - if err != nil { - return nil, err - } - // Set body to value of type `[]interface{}` - body = reflect.MakeSlice(reflect.TypeOf(pagesSlice), len(pagesSlice), len(pagesSlice)) - for i, s := range pagesSlice { - body.Index(i).Set(reflect.ValueOf(s)) - } - default: - err := gophercloud.ErrUnexpectedType{} - err.Expected = "map[string]interface{}/[]byte/[]interface{}" - err.Actual = fmt.Sprintf("%T", pb) - return nil, err - } - - // Each `Extract*` function is expecting a specific type of page coming back, - // otherwise the type assertion in those functions will fail. pageType is needed - // to create a type in this method that has the same type that the `Extract*` - // function is expecting and set the Body of that object to the concatenated - // pages. - page := reflect.New(pageType) - // Set the page body to be the concatenated pages. - page.Elem().FieldByName("Body").Set(body) - // Set any additional headers that were pass along. The `objectstorage` pacakge, - // for example, passes a Content-Type header. - h := make(http.Header) - for k, v := range p.Headers { - h.Add(k, v) - } - page.Elem().FieldByName("Header").Set(reflect.ValueOf(h)) - // Type assert the page to a Page interface so that the type assertion in the - // `Extract*` methods will work. - return page.Elem().Interface().(Page), err -} diff --git a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/pagination/pkg.go b/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/pagination/pkg.go deleted file mode 100644 index 912daea..0000000 --- a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/pagination/pkg.go +++ /dev/null @@ -1,4 +0,0 @@ -/* -Package pagination contains utilities and convenience structs that implement common pagination idioms within OpenStack APIs. -*/ -package pagination diff --git a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/pagination/single.go b/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/pagination/single.go deleted file mode 100644 index 4251d64..0000000 --- a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/pagination/single.go +++ /dev/null @@ -1,33 +0,0 @@ -package pagination - -import ( - "fmt" - "reflect" - - "github.com/gophercloud/gophercloud" -) - -// SinglePageBase may be embedded in a Page that contains all of the results from an operation at once. -type SinglePageBase PageResult - -// NextPageURL always returns "" to indicate that there are no more pages to return. -func (current SinglePageBase) NextPageURL() (string, error) { - return "", nil -} - -// IsEmpty satisifies the IsEmpty method of the Page interface -func (current SinglePageBase) IsEmpty() (bool, error) { - if b, ok := current.Body.([]interface{}); ok { - return len(b) == 0, nil - } - err := gophercloud.ErrUnexpectedType{} - err.Expected = "[]interface{}" - err.Actual = fmt.Sprintf("%v", reflect.TypeOf(current.Body)) - return true, err -} - -// GetBody returns the single page's body. This method is needed to satisfy the -// Page interface. -func (current SinglePageBase) GetBody() interface{} { - return current.Body -} diff --git a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/params.go b/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/params.go deleted file mode 100644 index b998666..0000000 --- a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/params.go +++ /dev/null @@ -1,491 +0,0 @@ -package gophercloud - -import ( - "encoding/json" - "fmt" - "net/url" - "reflect" - "strconv" - "strings" - "time" -) - -/* -BuildRequestBody builds a map[string]interface from the given `struct`. If -parent is not an empty string, the final map[string]interface returned will -encapsulate the built one. For example: - - disk := 1 - createOpts := flavors.CreateOpts{ - ID: "1", - Name: "m1.tiny", - Disk: &disk, - RAM: 512, - VCPUs: 1, - RxTxFactor: 1.0, - } - - body, err := gophercloud.BuildRequestBody(createOpts, "flavor") - -The above example can be run as-is, however it is recommended to look at how -BuildRequestBody is used within Gophercloud to more fully understand how it -fits within the request process as a whole rather than use it directly as shown -above. -*/ -func BuildRequestBody(opts interface{}, parent string) (map[string]interface{}, error) { - optsValue := reflect.ValueOf(opts) - if optsValue.Kind() == reflect.Ptr { - optsValue = optsValue.Elem() - } - - optsType := reflect.TypeOf(opts) - if optsType.Kind() == reflect.Ptr { - optsType = optsType.Elem() - } - - optsMap := make(map[string]interface{}) - if optsValue.Kind() == reflect.Struct { - //fmt.Printf("optsValue.Kind() is a reflect.Struct: %+v\n", optsValue.Kind()) - for i := 0; i < optsValue.NumField(); i++ { - v := optsValue.Field(i) - f := optsType.Field(i) - - if f.Name != strings.Title(f.Name) { - //fmt.Printf("Skipping field: %s...\n", f.Name) - continue - } - - //fmt.Printf("Starting on field: %s...\n", f.Name) - - zero := isZero(v) - //fmt.Printf("v is zero?: %v\n", zero) - - // if the field has a required tag that's set to "true" - if requiredTag := f.Tag.Get("required"); requiredTag == "true" { - //fmt.Printf("Checking required field [%s]:\n\tv: %+v\n\tisZero:%v\n", f.Name, v.Interface(), zero) - // if the field's value is zero, return a missing-argument error - if zero { - // if the field has a 'required' tag, it can't have a zero-value - err := ErrMissingInput{} - err.Argument = f.Name - return nil, err - } - } - - if xorTag := f.Tag.Get("xor"); xorTag != "" { - //fmt.Printf("Checking `xor` tag for field [%s] with value %+v:\n\txorTag: %s\n", f.Name, v, xorTag) - xorField := optsValue.FieldByName(xorTag) - var xorFieldIsZero bool - if reflect.ValueOf(xorField.Interface()) == reflect.Zero(xorField.Type()) { - xorFieldIsZero = true - } else { - if xorField.Kind() == reflect.Ptr { - xorField = xorField.Elem() - } - xorFieldIsZero = isZero(xorField) - } - if !(zero != xorFieldIsZero) { - err := ErrMissingInput{} - err.Argument = fmt.Sprintf("%s/%s", f.Name, xorTag) - err.Info = fmt.Sprintf("Exactly one of %s and %s must be provided", f.Name, xorTag) - return nil, err - } - } - - if orTag := f.Tag.Get("or"); orTag != "" { - //fmt.Printf("Checking `or` tag for field with:\n\tname: %+v\n\torTag:%s\n", f.Name, orTag) - //fmt.Printf("field is zero?: %v\n", zero) - if zero { - orField := optsValue.FieldByName(orTag) - var orFieldIsZero bool - if reflect.ValueOf(orField.Interface()) == reflect.Zero(orField.Type()) { - orFieldIsZero = true - } else { - if orField.Kind() == reflect.Ptr { - orField = orField.Elem() - } - orFieldIsZero = isZero(orField) - } - if orFieldIsZero { - err := ErrMissingInput{} - err.Argument = fmt.Sprintf("%s/%s", f.Name, orTag) - err.Info = fmt.Sprintf("At least one of %s and %s must be provided", f.Name, orTag) - return nil, err - } - } - } - - jsonTag := f.Tag.Get("json") - if jsonTag == "-" { - continue - } - - if v.Kind() == reflect.Slice || (v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Slice) { - sliceValue := v - if sliceValue.Kind() == reflect.Ptr { - sliceValue = sliceValue.Elem() - } - - for i := 0; i < sliceValue.Len(); i++ { - element := sliceValue.Index(i) - if element.Kind() == reflect.Struct || (element.Kind() == reflect.Ptr && element.Elem().Kind() == reflect.Struct) { - _, err := BuildRequestBody(element.Interface(), "") - if err != nil { - return nil, err - } - } - } - } - if v.Kind() == reflect.Struct || (v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct) { - if zero { - //fmt.Printf("value before change: %+v\n", optsValue.Field(i)) - if jsonTag != "" { - jsonTagPieces := strings.Split(jsonTag, ",") - if len(jsonTagPieces) > 1 && jsonTagPieces[1] == "omitempty" { - if v.CanSet() { - if !v.IsNil() { - if v.Kind() == reflect.Ptr { - v.Set(reflect.Zero(v.Type())) - } - } - //fmt.Printf("value after change: %+v\n", optsValue.Field(i)) - } - } - } - continue - } - - //fmt.Printf("Calling BuildRequestBody with:\n\tv: %+v\n\tf.Name:%s\n", v.Interface(), f.Name) - _, err := BuildRequestBody(v.Interface(), f.Name) - if err != nil { - return nil, err - } - } - } - - //fmt.Printf("opts: %+v \n", opts) - - b, err := json.Marshal(opts) - if err != nil { - return nil, err - } - - //fmt.Printf("string(b): %s\n", string(b)) - - err = json.Unmarshal(b, &optsMap) - if err != nil { - return nil, err - } - - //fmt.Printf("optsMap: %+v\n", optsMap) - - if parent != "" { - optsMap = map[string]interface{}{parent: optsMap} - } - //fmt.Printf("optsMap after parent added: %+v\n", optsMap) - return optsMap, nil - } - // Return an error if the underlying type of 'opts' isn't a struct. - return nil, fmt.Errorf("Options type is not a struct.") -} - -// EnabledState is a convenience type, mostly used in Create and Update -// operations. Because the zero value of a bool is FALSE, we need to use a -// pointer instead to indicate zero-ness. -type EnabledState *bool - -// Convenience vars for EnabledState values. -var ( - iTrue = true - iFalse = false - - Enabled EnabledState = &iTrue - Disabled EnabledState = &iFalse -) - -// IPVersion is a type for the possible IP address versions. Valid instances -// are IPv4 and IPv6 -type IPVersion int - -const ( - // IPv4 is used for IP version 4 addresses - IPv4 IPVersion = 4 - // IPv6 is used for IP version 6 addresses - IPv6 IPVersion = 6 -) - -// IntToPointer is a function for converting integers into integer pointers. -// This is useful when passing in options to operations. -func IntToPointer(i int) *int { - return &i -} - -/* -MaybeString is an internal function to be used by request methods in individual -resource packages. - -It takes a string that might be a zero value and returns either a pointer to its -address or nil. This is useful for allowing users to conveniently omit values -from an options struct by leaving them zeroed, but still pass nil to the JSON -serializer so they'll be omitted from the request body. -*/ -func MaybeString(original string) *string { - if original != "" { - return &original - } - return nil -} - -/* -MaybeInt is an internal function to be used by request methods in individual -resource packages. - -Like MaybeString, it accepts an int that may or may not be a zero value, and -returns either a pointer to its address or nil. It's intended to hint that the -JSON serializer should omit its field. -*/ -func MaybeInt(original int) *int { - if original != 0 { - return &original - } - return nil -} - -/* -func isUnderlyingStructZero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Ptr: - return isUnderlyingStructZero(v.Elem()) - default: - return isZero(v) - } -} -*/ - -var t time.Time - -func isZero(v reflect.Value) bool { - //fmt.Printf("\n\nchecking isZero for value: %+v\n", v) - switch v.Kind() { - case reflect.Ptr: - if v.IsNil() { - return true - } - return false - case reflect.Func, reflect.Map, reflect.Slice: - return v.IsNil() - case reflect.Array: - z := true - for i := 0; i < v.Len(); i++ { - z = z && isZero(v.Index(i)) - } - return z - case reflect.Struct: - if v.Type() == reflect.TypeOf(t) { - if v.Interface().(time.Time).IsZero() { - return true - } - return false - } - z := true - for i := 0; i < v.NumField(); i++ { - z = z && isZero(v.Field(i)) - } - return z - } - // Compare other types directly: - z := reflect.Zero(v.Type()) - //fmt.Printf("zero type for value: %+v\n\n\n", z) - return v.Interface() == z.Interface() -} - -/* -BuildQueryString is an internal function to be used by request methods in -individual resource packages. - -It accepts a tagged structure and expands it into a URL struct. Field names are -converted into query parameters based on a "q" tag. For example: - - type struct Something { - Bar string `q:"x_bar"` - Baz int `q:"lorem_ipsum"` - } - - instance := Something{ - Bar: "AAA", - Baz: "BBB", - } - -will be converted into "?x_bar=AAA&lorem_ipsum=BBB". - -The struct's fields may be strings, integers, or boolean values. Fields left at -their type's zero value will be omitted from the query. -*/ -func BuildQueryString(opts interface{}) (*url.URL, error) { - optsValue := reflect.ValueOf(opts) - if optsValue.Kind() == reflect.Ptr { - optsValue = optsValue.Elem() - } - - optsType := reflect.TypeOf(opts) - if optsType.Kind() == reflect.Ptr { - optsType = optsType.Elem() - } - - params := url.Values{} - - if optsValue.Kind() == reflect.Struct { - for i := 0; i < optsValue.NumField(); i++ { - v := optsValue.Field(i) - f := optsType.Field(i) - qTag := f.Tag.Get("q") - - // if the field has a 'q' tag, it goes in the query string - if qTag != "" { - tags := strings.Split(qTag, ",") - - // if the field is set, add it to the slice of query pieces - if !isZero(v) { - loop: - switch v.Kind() { - case reflect.Ptr: - v = v.Elem() - goto loop - case reflect.String: - params.Add(tags[0], v.String()) - case reflect.Int: - params.Add(tags[0], strconv.FormatInt(v.Int(), 10)) - case reflect.Bool: - params.Add(tags[0], strconv.FormatBool(v.Bool())) - case reflect.Slice: - switch v.Type().Elem() { - case reflect.TypeOf(0): - for i := 0; i < v.Len(); i++ { - params.Add(tags[0], strconv.FormatInt(v.Index(i).Int(), 10)) - } - default: - for i := 0; i < v.Len(); i++ { - params.Add(tags[0], v.Index(i).String()) - } - } - case reflect.Map: - if v.Type().Key().Kind() == reflect.String && v.Type().Elem().Kind() == reflect.String { - var s []string - for _, k := range v.MapKeys() { - value := v.MapIndex(k).String() - s = append(s, fmt.Sprintf("'%s':'%s'", k.String(), value)) - } - params.Add(tags[0], fmt.Sprintf("{%s}", strings.Join(s, ", "))) - } - } - } else { - // if the field has a 'required' tag, it can't have a zero-value - if requiredTag := f.Tag.Get("required"); requiredTag == "true" { - return &url.URL{}, fmt.Errorf("Required query parameter [%s] not set.", f.Name) - } - } - } - } - - return &url.URL{RawQuery: params.Encode()}, nil - } - // Return an error if the underlying type of 'opts' isn't a struct. - return nil, fmt.Errorf("Options type is not a struct.") -} - -/* -BuildHeaders is an internal function to be used by request methods in -individual resource packages. - -It accepts an arbitrary tagged structure and produces a string map that's -suitable for use as the HTTP headers of an outgoing request. Field names are -mapped to header names based in "h" tags. - - type struct Something { - Bar string `h:"x_bar"` - Baz int `h:"lorem_ipsum"` - } - - instance := Something{ - Bar: "AAA", - Baz: "BBB", - } - -will be converted into: - - map[string]string{ - "x_bar": "AAA", - "lorem_ipsum": "BBB", - } - -Untagged fields and fields left at their zero values are skipped. Integers, -booleans and string values are supported. -*/ -func BuildHeaders(opts interface{}) (map[string]string, error) { - optsValue := reflect.ValueOf(opts) - if optsValue.Kind() == reflect.Ptr { - optsValue = optsValue.Elem() - } - - optsType := reflect.TypeOf(opts) - if optsType.Kind() == reflect.Ptr { - optsType = optsType.Elem() - } - - optsMap := make(map[string]string) - if optsValue.Kind() == reflect.Struct { - for i := 0; i < optsValue.NumField(); i++ { - v := optsValue.Field(i) - f := optsType.Field(i) - hTag := f.Tag.Get("h") - - // if the field has a 'h' tag, it goes in the header - if hTag != "" { - tags := strings.Split(hTag, ",") - - // if the field is set, add it to the slice of query pieces - if !isZero(v) { - switch v.Kind() { - case reflect.String: - optsMap[tags[0]] = v.String() - case reflect.Int: - optsMap[tags[0]] = strconv.FormatInt(v.Int(), 10) - case reflect.Bool: - optsMap[tags[0]] = strconv.FormatBool(v.Bool()) - } - } else { - // if the field has a 'required' tag, it can't have a zero-value - if requiredTag := f.Tag.Get("required"); requiredTag == "true" { - return optsMap, fmt.Errorf("Required header [%s] not set.", f.Name) - } - } - } - - } - return optsMap, nil - } - // Return an error if the underlying type of 'opts' isn't a struct. - return optsMap, fmt.Errorf("Options type is not a struct.") -} - -// IDSliceToQueryString takes a slice of elements and converts them into a query -// string. For example, if name=foo and slice=[]int{20, 40, 60}, then the -// result would be `?name=20&name=40&name=60' -func IDSliceToQueryString(name string, ids []int) string { - str := "" - for k, v := range ids { - if k == 0 { - str += "?" - } else { - str += "&" - } - str += fmt.Sprintf("%s=%s", name, strconv.Itoa(v)) - } - return str -} - -// IntWithinRange returns TRUE if an integer falls within a defined range, and -// FALSE if not. -func IntWithinRange(val, min, max int) bool { - return val > min && val < max -} diff --git a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/provider_client.go b/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/provider_client.go deleted file mode 100644 index fce0046..0000000 --- a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/provider_client.go +++ /dev/null @@ -1,501 +0,0 @@ -package gophercloud - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "io" - "io/ioutil" - "net/http" - "strings" - "sync" -) - -// DefaultUserAgent is the default User-Agent string set in the request header. -const DefaultUserAgent = "gophercloud/2.0.0" - -// UserAgent represents a User-Agent header. -type UserAgent struct { - // prepend is the slice of User-Agent strings to prepend to DefaultUserAgent. - // All the strings to prepend are accumulated and prepended in the Join method. - prepend []string -} - -// Prepend prepends a user-defined string to the default User-Agent string. Users -// may pass in one or more strings to prepend. -func (ua *UserAgent) Prepend(s ...string) { - ua.prepend = append(s, ua.prepend...) -} - -// Join concatenates all the user-defined User-Agend strings with the default -// Gophercloud User-Agent string. -func (ua *UserAgent) Join() string { - uaSlice := append(ua.prepend, DefaultUserAgent) - return strings.Join(uaSlice, " ") -} - -// ProviderClient stores details that are required to interact with any -// services within a specific provider's API. -// -// Generally, you acquire a ProviderClient by calling the NewClient method in -// the appropriate provider's child package, providing whatever authentication -// credentials are required. -type ProviderClient struct { - // IdentityBase is the base URL used for a particular provider's identity - // service - it will be used when issuing authenticatation requests. It - // should point to the root resource of the identity service, not a specific - // identity version. - IdentityBase string - - // IdentityEndpoint is the identity endpoint. This may be a specific version - // of the identity service. If this is the case, this endpoint is used rather - // than querying versions first. - IdentityEndpoint string - - // TokenID is the ID of the most recently issued valid token. - // NOTE: Aside from within a custom ReauthFunc, this field shouldn't be set by an application. - // To safely read or write this value, call `Token` or `SetToken`, respectively - TokenID string - - // EndpointLocator describes how this provider discovers the endpoints for - // its constituent services. - EndpointLocator EndpointLocator - - // HTTPClient allows users to interject arbitrary http, https, or other transit behaviors. - HTTPClient http.Client - - // UserAgent represents the User-Agent header in the HTTP request. - UserAgent UserAgent - - // ReauthFunc is the function used to re-authenticate the user if the request - // fails with a 401 HTTP response code. This a needed because there may be multiple - // authentication functions for different Identity service versions. - ReauthFunc func() error - - // Throwaway determines whether if this client is a throw-away client. It's a copy of user's provider client - // with the token and reauth func zeroed. Such client can be used to perform reauthorization. - Throwaway bool - - // Context is the context passed to the HTTP request. - Context context.Context - - // mut is a mutex for the client. It protects read and write access to client attributes such as getting - // and setting the TokenID. - mut *sync.RWMutex - - // reauthmut is a mutex for reauthentication it attempts to ensure that only one reauthentication - // attempt happens at one time. - reauthmut *reauthlock - - authResult AuthResult -} - -// reauthlock represents a set of attributes used to help in the reauthentication process. -type reauthlock struct { - sync.RWMutex - reauthing bool - reauthingErr error - done *sync.Cond -} - -// AuthenticatedHeaders returns a map of HTTP headers that are common for all -// authenticated service requests. Blocks if Reauthenticate is in progress. -func (client *ProviderClient) AuthenticatedHeaders() (m map[string]string) { - if client.IsThrowaway() { - return - } - if client.reauthmut != nil { - client.reauthmut.Lock() - for client.reauthmut.reauthing { - client.reauthmut.done.Wait() - } - client.reauthmut.Unlock() - } - t := client.Token() - if t == "" { - return - } - return map[string]string{"X-Auth-Token": t} -} - -// UseTokenLock creates a mutex that is used to allow safe concurrent access to the auth token. -// If the application's ProviderClient is not used concurrently, this doesn't need to be called. -func (client *ProviderClient) UseTokenLock() { - client.mut = new(sync.RWMutex) - client.reauthmut = new(reauthlock) -} - -// GetAuthResult returns the result from the request that was used to obtain a -// provider client's Keystone token. -// -// The result is nil when authentication has not yet taken place, when the token -// was set manually with SetToken(), or when a ReauthFunc was used that does not -// record the AuthResult. -func (client *ProviderClient) GetAuthResult() AuthResult { - if client.mut != nil { - client.mut.RLock() - defer client.mut.RUnlock() - } - return client.authResult -} - -// Token safely reads the value of the auth token from the ProviderClient. Applications should -// call this method to access the token instead of the TokenID field -func (client *ProviderClient) Token() string { - if client.mut != nil { - client.mut.RLock() - defer client.mut.RUnlock() - } - return client.TokenID -} - -// SetToken safely sets the value of the auth token in the ProviderClient. Applications may -// use this method in a custom ReauthFunc. -// -// WARNING: This function is deprecated. Use SetTokenAndAuthResult() instead. -func (client *ProviderClient) SetToken(t string) { - if client.mut != nil { - client.mut.Lock() - defer client.mut.Unlock() - } - client.TokenID = t - client.authResult = nil -} - -// SetTokenAndAuthResult safely sets the value of the auth token in the -// ProviderClient and also records the AuthResult that was returned from the -// token creation request. Applications may call this in a custom ReauthFunc. -func (client *ProviderClient) SetTokenAndAuthResult(r AuthResult) error { - tokenID := "" - var err error - if r != nil { - tokenID, err = r.ExtractTokenID() - if err != nil { - return err - } - } - - if client.mut != nil { - client.mut.Lock() - defer client.mut.Unlock() - } - client.TokenID = tokenID - client.authResult = r - return nil -} - -// CopyTokenFrom safely copies the token from another ProviderClient into the -// this one. -func (client *ProviderClient) CopyTokenFrom(other *ProviderClient) { - if client.mut != nil { - client.mut.Lock() - defer client.mut.Unlock() - } - if other.mut != nil && other.mut != client.mut { - other.mut.RLock() - defer other.mut.RUnlock() - } - client.TokenID = other.TokenID - client.authResult = other.authResult -} - -// IsThrowaway safely reads the value of the client Throwaway field. -func (client *ProviderClient) IsThrowaway() bool { - if client.reauthmut != nil { - client.reauthmut.RLock() - defer client.reauthmut.RUnlock() - } - return client.Throwaway -} - -// SetThrowaway safely sets the value of the client Throwaway field. -func (client *ProviderClient) SetThrowaway(v bool) { - if client.reauthmut != nil { - client.reauthmut.Lock() - defer client.reauthmut.Unlock() - } - client.Throwaway = v -} - -// Reauthenticate calls client.ReauthFunc in a thread-safe way. If this is -// called because of a 401 response, the caller may pass the previous token. In -// this case, the reauthentication can be skipped if another thread has already -// reauthenticated in the meantime. If no previous token is known, an empty -// string should be passed instead to force unconditional reauthentication. -func (client *ProviderClient) Reauthenticate(previousToken string) (err error) { - if client.ReauthFunc == nil { - return nil - } - - if client.reauthmut == nil { - return client.ReauthFunc() - } - - client.reauthmut.Lock() - if client.reauthmut.reauthing { - for !client.reauthmut.reauthing { - client.reauthmut.done.Wait() - } - err = client.reauthmut.reauthingErr - client.reauthmut.Unlock() - return err - } - client.reauthmut.Unlock() - - client.reauthmut.Lock() - client.reauthmut.reauthing = true - client.reauthmut.done = sync.NewCond(client.reauthmut) - client.reauthmut.reauthingErr = nil - client.reauthmut.Unlock() - - if previousToken == "" || client.TokenID == previousToken { - err = client.ReauthFunc() - } - - client.reauthmut.Lock() - client.reauthmut.reauthing = false - client.reauthmut.reauthingErr = err - client.reauthmut.done.Broadcast() - client.reauthmut.Unlock() - return -} - -// RequestOpts customizes the behavior of the provider.Request() method. -type RequestOpts struct { - // JSONBody, if provided, will be encoded as JSON and used as the body of the HTTP request. The - // content type of the request will default to "application/json" unless overridden by MoreHeaders. - // It's an error to specify both a JSONBody and a RawBody. - JSONBody interface{} - // RawBody contains an io.Reader that will be consumed by the request directly. No content-type - // will be set unless one is provided explicitly by MoreHeaders. - RawBody io.Reader - // JSONResponse, if provided, will be populated with the contents of the response body parsed as - // JSON. - JSONResponse interface{} - // OkCodes contains a list of numeric HTTP status codes that should be interpreted as success. If - // the response has a different code, an error will be returned. - OkCodes []int - // MoreHeaders specifies additional HTTP headers to be provide on the request. If a header is - // provided with a blank value (""), that header will be *omitted* instead: use this to suppress - // the default Accept header or an inferred Content-Type, for example. - MoreHeaders map[string]string - // ErrorContext specifies the resource error type to return if an error is encountered. - // This lets resources override default error messages based on the response status code. - ErrorContext error -} - -var applicationJSON = "application/json" - -// Request performs an HTTP request using the ProviderClient's current HTTPClient. An authentication -// header will automatically be provided. -func (client *ProviderClient) Request(method, url string, options *RequestOpts) (*http.Response, error) { - var body io.Reader - var contentType *string - - // Derive the content body by either encoding an arbitrary object as JSON, or by taking a provided - // io.ReadSeeker as-is. Default the content-type to application/json. - if options.JSONBody != nil { - if options.RawBody != nil { - return nil, errors.New("please provide only one of JSONBody or RawBody to gophercloud.Request()") - } - - rendered, err := json.Marshal(options.JSONBody) - if err != nil { - return nil, err - } - - body = bytes.NewReader(rendered) - contentType = &applicationJSON - } - - if options.RawBody != nil { - body = options.RawBody - } - - // Construct the http.Request. - req, err := http.NewRequest(method, url, body) - if err != nil { - return nil, err - } - if client.Context != nil { - req = req.WithContext(client.Context) - } - - // Populate the request headers. Apply options.MoreHeaders last, to give the caller the chance to - // modify or omit any header. - if contentType != nil { - req.Header.Set("Content-Type", *contentType) - } - req.Header.Set("Accept", applicationJSON) - - // Set the User-Agent header - req.Header.Set("User-Agent", client.UserAgent.Join()) - - if options.MoreHeaders != nil { - for k, v := range options.MoreHeaders { - if v != "" { - req.Header.Set(k, v) - } else { - req.Header.Del(k) - } - } - } - - // get latest token from client - for k, v := range client.AuthenticatedHeaders() { - req.Header.Set(k, v) - } - - // Set connection parameter to close the connection immediately when we've got the response - req.Close = true - - prereqtok := req.Header.Get("X-Auth-Token") - - // Issue the request. - resp, err := client.HTTPClient.Do(req) - if err != nil { - return nil, err - } - - // Allow default OkCodes if none explicitly set - okc := options.OkCodes - if okc == nil { - okc = defaultOkCodes(method) - } - - // Validate the HTTP response status. - var ok bool - for _, code := range okc { - if resp.StatusCode == code { - ok = true - break - } - } - - if !ok { - body, _ := ioutil.ReadAll(resp.Body) - resp.Body.Close() - respErr := ErrUnexpectedResponseCode{ - URL: url, - Method: method, - Expected: options.OkCodes, - Actual: resp.StatusCode, - Body: body, - } - - errType := options.ErrorContext - switch resp.StatusCode { - case http.StatusBadRequest: - err = ErrDefault400{respErr} - if error400er, ok := errType.(Err400er); ok { - err = error400er.Error400(respErr) - } - case http.StatusUnauthorized: - if client.ReauthFunc != nil { - err = client.Reauthenticate(prereqtok) - if err != nil { - e := &ErrUnableToReauthenticate{} - e.ErrOriginal = respErr - return nil, e - } - if options.RawBody != nil { - if seeker, ok := options.RawBody.(io.Seeker); ok { - seeker.Seek(0, 0) - } - } - resp, err = client.Request(method, url, options) - if err != nil { - switch err.(type) { - case *ErrUnexpectedResponseCode: - e := &ErrErrorAfterReauthentication{} - e.ErrOriginal = err.(*ErrUnexpectedResponseCode) - return nil, e - default: - e := &ErrErrorAfterReauthentication{} - e.ErrOriginal = err - return nil, e - } - } - return resp, nil - } - err = ErrDefault401{respErr} - if error401er, ok := errType.(Err401er); ok { - err = error401er.Error401(respErr) - } - case http.StatusForbidden: - err = ErrDefault403{respErr} - if error403er, ok := errType.(Err403er); ok { - err = error403er.Error403(respErr) - } - case http.StatusNotFound: - err = ErrDefault404{respErr} - if error404er, ok := errType.(Err404er); ok { - err = error404er.Error404(respErr) - } - case http.StatusMethodNotAllowed: - err = ErrDefault405{respErr} - if error405er, ok := errType.(Err405er); ok { - err = error405er.Error405(respErr) - } - case http.StatusRequestTimeout: - err = ErrDefault408{respErr} - if error408er, ok := errType.(Err408er); ok { - err = error408er.Error408(respErr) - } - case http.StatusConflict: - err = ErrDefault409{respErr} - if error409er, ok := errType.(Err409er); ok { - err = error409er.Error409(respErr) - } - case 429: - err = ErrDefault429{respErr} - if error429er, ok := errType.(Err429er); ok { - err = error429er.Error429(respErr) - } - case http.StatusInternalServerError: - err = ErrDefault500{respErr} - if error500er, ok := errType.(Err500er); ok { - err = error500er.Error500(respErr) - } - case http.StatusServiceUnavailable: - err = ErrDefault503{respErr} - if error503er, ok := errType.(Err503er); ok { - err = error503er.Error503(respErr) - } - } - - if err == nil { - err = respErr - } - - return resp, err - } - - // Parse the response body as JSON, if requested to do so. - if options.JSONResponse != nil { - defer resp.Body.Close() - if err := json.NewDecoder(resp.Body).Decode(options.JSONResponse); err != nil { - return nil, err - } - } - - return resp, nil -} - -func defaultOkCodes(method string) []int { - switch { - case method == "GET": - return []int{200} - case method == "POST": - return []int{201, 202} - case method == "PUT": - return []int{201, 202} - case method == "PATCH": - return []int{200, 202, 204} - case method == "DELETE": - return []int{202, 204} - } - - return []int{} -} diff --git a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/results.go b/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/results.go deleted file mode 100644 index 94a16bf..0000000 --- a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/results.go +++ /dev/null @@ -1,448 +0,0 @@ -package gophercloud - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "net/http" - "reflect" - "strconv" - "time" -) - -/* -Result is an internal type to be used by individual resource packages, but its -methods will be available on a wide variety of user-facing embedding types. - -It acts as a base struct that other Result types, returned from request -functions, can embed for convenience. All Results capture basic information -from the HTTP transaction that was performed, including the response body, -HTTP headers, and any errors that happened. - -Generally, each Result type will have an Extract method that can be used to -further interpret the result's payload in a specific context. Extensions or -providers can then provide additional extraction functions to pull out -provider- or extension-specific information as well. -*/ -type Result struct { - // Body is the payload of the HTTP response from the server. In most cases, - // this will be the deserialized JSON structure. - Body interface{} - - // Header contains the HTTP header structure from the original response. - Header http.Header - - // Err is an error that occurred during the operation. It's deferred until - // extraction to make it easier to chain the Extract call. - Err error -} - -// ExtractInto allows users to provide an object into which `Extract` will extract -// the `Result.Body`. This would be useful for OpenStack providers that have -// different fields in the response object than OpenStack proper. -func (r Result) ExtractInto(to interface{}) error { - if r.Err != nil { - return r.Err - } - - if reader, ok := r.Body.(io.Reader); ok { - if readCloser, ok := reader.(io.Closer); ok { - defer readCloser.Close() - } - return json.NewDecoder(reader).Decode(to) - } - - b, err := json.Marshal(r.Body) - if err != nil { - return err - } - err = json.Unmarshal(b, to) - - return err -} - -func (r Result) extractIntoPtr(to interface{}, label string) error { - if label == "" { - return r.ExtractInto(&to) - } - - var m map[string]interface{} - err := r.ExtractInto(&m) - if err != nil { - return err - } - - b, err := json.Marshal(m[label]) - if err != nil { - return err - } - - toValue := reflect.ValueOf(to) - if toValue.Kind() == reflect.Ptr { - toValue = toValue.Elem() - } - - switch toValue.Kind() { - case reflect.Slice: - typeOfV := toValue.Type().Elem() - if typeOfV.Kind() == reflect.Struct { - if typeOfV.NumField() > 0 && typeOfV.Field(0).Anonymous { - newSlice := reflect.MakeSlice(reflect.SliceOf(typeOfV), 0, 0) - - if mSlice, ok := m[label].([]interface{}); ok { - for _, v := range mSlice { - // For each iteration of the slice, we create a new struct. - // This is to work around a bug where elements of a slice - // are reused and not overwritten when the same copy of the - // struct is used: - // - // https://github.com/golang/go/issues/21092 - // https://github.com/golang/go/issues/24155 - // https://play.golang.org/p/NHo3ywlPZli - newType := reflect.New(typeOfV).Elem() - - b, err := json.Marshal(v) - if err != nil { - return err - } - - // This is needed for structs with an UnmarshalJSON method. - // Technically this is just unmarshalling the response into - // a struct that is never used, but it's good enough to - // trigger the UnmarshalJSON method. - for i := 0; i < newType.NumField(); i++ { - s := newType.Field(i).Addr().Interface() - - // Unmarshal is used rather than NewDecoder to also work - // around the above-mentioned bug. - err = json.Unmarshal(b, s) - if err != nil { - return err - } - } - - newSlice = reflect.Append(newSlice, newType) - } - } - - // "to" should now be properly modeled to receive the - // JSON response body and unmarshal into all the correct - // fields of the struct or composed extension struct - // at the end of this method. - toValue.Set(newSlice) - } - } - case reflect.Struct: - typeOfV := toValue.Type() - if typeOfV.NumField() > 0 && typeOfV.Field(0).Anonymous { - for i := 0; i < toValue.NumField(); i++ { - toField := toValue.Field(i) - if toField.Kind() == reflect.Struct { - s := toField.Addr().Interface() - err = json.NewDecoder(bytes.NewReader(b)).Decode(s) - if err != nil { - return err - } - } - } - } - } - - err = json.Unmarshal(b, &to) - return err -} - -// ExtractIntoStructPtr will unmarshal the Result (r) into the provided -// interface{} (to). -// -// NOTE: For internal use only -// -// `to` must be a pointer to an underlying struct type -// -// If provided, `label` will be filtered out of the response -// body prior to `r` being unmarshalled into `to`. -func (r Result) ExtractIntoStructPtr(to interface{}, label string) error { - if r.Err != nil { - return r.Err - } - - t := reflect.TypeOf(to) - if k := t.Kind(); k != reflect.Ptr { - return fmt.Errorf("Expected pointer, got %v", k) - } - switch t.Elem().Kind() { - case reflect.Struct: - return r.extractIntoPtr(to, label) - default: - return fmt.Errorf("Expected pointer to struct, got: %v", t) - } -} - -// ExtractIntoSlicePtr will unmarshal the Result (r) into the provided -// interface{} (to). -// -// NOTE: For internal use only -// -// `to` must be a pointer to an underlying slice type -// -// If provided, `label` will be filtered out of the response -// body prior to `r` being unmarshalled into `to`. -func (r Result) ExtractIntoSlicePtr(to interface{}, label string) error { - if r.Err != nil { - return r.Err - } - - t := reflect.TypeOf(to) - if k := t.Kind(); k != reflect.Ptr { - return fmt.Errorf("Expected pointer, got %v", k) - } - switch t.Elem().Kind() { - case reflect.Slice: - return r.extractIntoPtr(to, label) - default: - return fmt.Errorf("Expected pointer to slice, got: %v", t) - } -} - -// PrettyPrintJSON creates a string containing the full response body as -// pretty-printed JSON. It's useful for capturing test fixtures and for -// debugging extraction bugs. If you include its output in an issue related to -// a buggy extraction function, we will all love you forever. -func (r Result) PrettyPrintJSON() string { - pretty, err := json.MarshalIndent(r.Body, "", " ") - if err != nil { - panic(err.Error()) - } - return string(pretty) -} - -// ErrResult is an internal type to be used by individual resource packages, but -// its methods will be available on a wide variety of user-facing embedding -// types. -// -// It represents results that only contain a potential error and -// nothing else. Usually, if the operation executed successfully, the Err field -// will be nil; otherwise it will be stocked with a relevant error. Use the -// ExtractErr method -// to cleanly pull it out. -type ErrResult struct { - Result -} - -// ExtractErr is a function that extracts error information, or nil, from a result. -func (r ErrResult) ExtractErr() error { - return r.Err -} - -/* -HeaderResult is an internal type to be used by individual resource packages, but -its methods will be available on a wide variety of user-facing embedding types. - -It represents a result that only contains an error (possibly nil) and an -http.Header. This is used, for example, by the objectstorage packages in -openstack, because most of the operations don't return response bodies, but do -have relevant information in headers. -*/ -type HeaderResult struct { - Result -} - -// ExtractInto allows users to provide an object into which `Extract` will -// extract the http.Header headers of the result. -func (r HeaderResult) ExtractInto(to interface{}) error { - if r.Err != nil { - return r.Err - } - - tmpHeaderMap := map[string]string{} - for k, v := range r.Header { - if len(v) > 0 { - tmpHeaderMap[k] = v[0] - } - } - - b, err := json.Marshal(tmpHeaderMap) - if err != nil { - return err - } - err = json.Unmarshal(b, to) - - return err -} - -// RFC3339Milli describes a common time format used by some API responses. -const RFC3339Milli = "2006-01-02T15:04:05.999999Z" - -type JSONRFC3339Milli time.Time - -func (jt *JSONRFC3339Milli) UnmarshalJSON(data []byte) error { - b := bytes.NewBuffer(data) - dec := json.NewDecoder(b) - var s string - if err := dec.Decode(&s); err != nil { - return err - } - t, err := time.Parse(RFC3339Milli, s) - if err != nil { - return err - } - *jt = JSONRFC3339Milli(t) - return nil -} - -const RFC3339MilliNoZ = "2006-01-02T15:04:05.999999" - -type JSONRFC3339MilliNoZ time.Time - -func (jt *JSONRFC3339MilliNoZ) UnmarshalJSON(data []byte) error { - var s string - if err := json.Unmarshal(data, &s); err != nil { - return err - } - if s == "" { - return nil - } - t, err := time.Parse(RFC3339MilliNoZ, s) - if err != nil { - return err - } - *jt = JSONRFC3339MilliNoZ(t) - return nil -} - -type JSONRFC1123 time.Time - -func (jt *JSONRFC1123) UnmarshalJSON(data []byte) error { - var s string - if err := json.Unmarshal(data, &s); err != nil { - return err - } - if s == "" { - return nil - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - return err - } - *jt = JSONRFC1123(t) - return nil -} - -type JSONUnix time.Time - -func (jt *JSONUnix) UnmarshalJSON(data []byte) error { - var s string - if err := json.Unmarshal(data, &s); err != nil { - return err - } - if s == "" { - return nil - } - unix, err := strconv.ParseInt(s, 10, 64) - if err != nil { - return err - } - t = time.Unix(unix, 0) - *jt = JSONUnix(t) - return nil -} - -// RFC3339NoZ is the time format used in Heat (Orchestration). -const RFC3339NoZ = "2006-01-02T15:04:05" - -type JSONRFC3339NoZ time.Time - -func (jt *JSONRFC3339NoZ) UnmarshalJSON(data []byte) error { - var s string - if err := json.Unmarshal(data, &s); err != nil { - return err - } - if s == "" { - return nil - } - t, err := time.Parse(RFC3339NoZ, s) - if err != nil { - return err - } - *jt = JSONRFC3339NoZ(t) - return nil -} - -// RFC3339ZNoT is the time format used in Zun (Containers Service). -const RFC3339ZNoT = "2006-01-02 15:04:05-07:00" - -type JSONRFC3339ZNoT time.Time - -func (jt *JSONRFC3339ZNoT) UnmarshalJSON(data []byte) error { - var s string - if err := json.Unmarshal(data, &s); err != nil { - return err - } - if s == "" { - return nil - } - t, err := time.Parse(RFC3339ZNoT, s) - if err != nil { - return err - } - *jt = JSONRFC3339ZNoT(t) - return nil -} - -// RFC3339ZNoTNoZ is another time format used in Zun (Containers Service). -const RFC3339ZNoTNoZ = "2006-01-02 15:04:05" - -type JSONRFC3339ZNoTNoZ time.Time - -func (jt *JSONRFC3339ZNoTNoZ) UnmarshalJSON(data []byte) error { - var s string - if err := json.Unmarshal(data, &s); err != nil { - return err - } - if s == "" { - return nil - } - t, err := time.Parse(RFC3339ZNoTNoZ, s) - if err != nil { - return err - } - *jt = JSONRFC3339ZNoTNoZ(t) - return nil -} - -/* -Link is an internal type to be used in packages of collection resources that are -paginated in a certain way. - -It's a response substructure common to many paginated collection results that is -used to point to related pages. Usually, the one we care about is the one with -Rel field set to "next". -*/ -type Link struct { - Href string `json:"href"` - Rel string `json:"rel"` -} - -/* -ExtractNextURL is an internal function useful for packages of collection -resources that are paginated in a certain way. - -It attempts to extract the "next" URL from slice of Link structs, or -"" if no such URL is present. -*/ -func ExtractNextURL(links []Link) (string, error) { - var url string - - for _, l := range links { - if l.Rel == "next" { - url = l.Href - } - } - - if url == "" { - return "", nil - } - - return url, nil -} diff --git a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/service_client.go b/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/service_client.go deleted file mode 100644 index f222f05..0000000 --- a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/service_client.go +++ /dev/null @@ -1,154 +0,0 @@ -package gophercloud - -import ( - "io" - "net/http" - "strings" -) - -// ServiceClient stores details required to interact with a specific service API implemented by a provider. -// Generally, you'll acquire these by calling the appropriate `New` method on a ProviderClient. -type ServiceClient struct { - // ProviderClient is a reference to the provider that implements this service. - *ProviderClient - - // Endpoint is the base URL of the service's API, acquired from a service catalog. - // It MUST end with a /. - Endpoint string - - // ResourceBase is the base URL shared by the resources within a service's API. It should include - // the API version and, like Endpoint, MUST end with a / if set. If not set, the Endpoint is used - // as-is, instead. - ResourceBase string - - // This is the service client type (e.g. compute, sharev2). - // NOTE: FOR INTERNAL USE ONLY. DO NOT SET. GOPHERCLOUD WILL SET THIS. - // It is only exported because it gets set in a different package. - Type string - - // The microversion of the service to use. Set this to use a particular microversion. - Microversion string - - // MoreHeaders allows users (or Gophercloud) to set service-wide headers on requests. Put another way, - // values set in this field will be set on all the HTTP requests the service client sends. - MoreHeaders map[string]string -} - -// ResourceBaseURL returns the base URL of any resources used by this service. It MUST end with a /. -func (client *ServiceClient) ResourceBaseURL() string { - if client.ResourceBase != "" { - return client.ResourceBase - } - return client.Endpoint -} - -// ServiceURL constructs a URL for a resource belonging to this provider. -func (client *ServiceClient) ServiceURL(parts ...string) string { - return client.ResourceBaseURL() + strings.Join(parts, "/") -} - -func (client *ServiceClient) initReqOpts(url string, JSONBody interface{}, JSONResponse interface{}, opts *RequestOpts) { - if v, ok := (JSONBody).(io.Reader); ok { - opts.RawBody = v - } else if JSONBody != nil { - opts.JSONBody = JSONBody - } - - if JSONResponse != nil { - opts.JSONResponse = JSONResponse - } - - if opts.MoreHeaders == nil { - opts.MoreHeaders = make(map[string]string) - } - - if client.Microversion != "" { - client.setMicroversionHeader(opts) - } -} - -// Get calls `Request` with the "GET" HTTP verb. -func (client *ServiceClient) Get(url string, JSONResponse interface{}, opts *RequestOpts) (*http.Response, error) { - if opts == nil { - opts = new(RequestOpts) - } - client.initReqOpts(url, nil, JSONResponse, opts) - return client.Request("GET", url, opts) -} - -// Post calls `Request` with the "POST" HTTP verb. -func (client *ServiceClient) Post(url string, JSONBody interface{}, JSONResponse interface{}, opts *RequestOpts) (*http.Response, error) { - if opts == nil { - opts = new(RequestOpts) - } - client.initReqOpts(url, JSONBody, JSONResponse, opts) - return client.Request("POST", url, opts) -} - -// Put calls `Request` with the "PUT" HTTP verb. -func (client *ServiceClient) Put(url string, JSONBody interface{}, JSONResponse interface{}, opts *RequestOpts) (*http.Response, error) { - if opts == nil { - opts = new(RequestOpts) - } - client.initReqOpts(url, JSONBody, JSONResponse, opts) - return client.Request("PUT", url, opts) -} - -// Patch calls `Request` with the "PATCH" HTTP verb. -func (client *ServiceClient) Patch(url string, JSONBody interface{}, JSONResponse interface{}, opts *RequestOpts) (*http.Response, error) { - if opts == nil { - opts = new(RequestOpts) - } - client.initReqOpts(url, JSONBody, JSONResponse, opts) - return client.Request("PATCH", url, opts) -} - -// Delete calls `Request` with the "DELETE" HTTP verb. -func (client *ServiceClient) Delete(url string, opts *RequestOpts) (*http.Response, error) { - if opts == nil { - opts = new(RequestOpts) - } - client.initReqOpts(url, nil, nil, opts) - return client.Request("DELETE", url, opts) -} - -// Head calls `Request` with the "HEAD" HTTP verb. -func (client *ServiceClient) Head(url string, opts *RequestOpts) (*http.Response, error) { - if opts == nil { - opts = new(RequestOpts) - } - client.initReqOpts(url, nil, nil, opts) - return client.Request("HEAD", url, opts) -} - -func (client *ServiceClient) setMicroversionHeader(opts *RequestOpts) { - switch client.Type { - case "compute": - opts.MoreHeaders["X-OpenStack-Nova-API-Version"] = client.Microversion - case "sharev2": - opts.MoreHeaders["X-OpenStack-Manila-API-Version"] = client.Microversion - case "volume": - opts.MoreHeaders["X-OpenStack-Volume-API-Version"] = client.Microversion - case "baremetal": - opts.MoreHeaders["X-OpenStack-Ironic-API-Version"] = client.Microversion - case "baremetal-introspection": - opts.MoreHeaders["X-OpenStack-Ironic-Inspector-API-Version"] = client.Microversion - } - - if client.Type != "" { - opts.MoreHeaders["OpenStack-API-Version"] = client.Type + " " + client.Microversion - } -} - -// Request carries out the HTTP operation for the service client -func (client *ServiceClient) Request(method, url string, options *RequestOpts) (*http.Response, error) { - if len(client.MoreHeaders) > 0 { - if options == nil { - options = new(RequestOpts) - } - for k, v := range client.MoreHeaders { - options.MoreHeaders[k] = v - } - } - return client.ProviderClient.Request(method, url, options) -} diff --git a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/util.go b/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/util.go deleted file mode 100644 index 68f9a5d..0000000 --- a/cmd/bpa-operator/vendor/github.com/gophercloud/gophercloud/util.go +++ /dev/null @@ -1,102 +0,0 @@ -package gophercloud - -import ( - "fmt" - "net/url" - "path/filepath" - "strings" - "time" -) - -// WaitFor polls a predicate function, once per second, up to a timeout limit. -// This is useful to wait for a resource to transition to a certain state. -// To handle situations when the predicate might hang indefinitely, the -// predicate will be prematurely cancelled after the timeout. -// Resource packages will wrap this in a more convenient function that's -// specific to a certain resource, but it can also be useful on its own. -func WaitFor(timeout int, predicate func() (bool, error)) error { - type WaitForResult struct { - Success bool - Error error - } - - start := time.Now().Unix() - - for { - // If a timeout is set, and that's been exceeded, shut it down. - if timeout >= 0 && time.Now().Unix()-start >= int64(timeout) { - return fmt.Errorf("A timeout occurred") - } - - time.Sleep(1 * time.Second) - - var result WaitForResult - ch := make(chan bool, 1) - go func() { - defer close(ch) - satisfied, err := predicate() - result.Success = satisfied - result.Error = err - }() - - select { - case <-ch: - if result.Error != nil { - return result.Error - } - if result.Success { - return nil - } - // If the predicate has not finished by the timeout, cancel it. - case <-time.After(time.Duration(timeout) * time.Second): - return fmt.Errorf("A timeout occurred") - } - } -} - -// NormalizeURL is an internal function to be used by provider clients. -// -// It ensures that each endpoint URL has a closing `/`, as expected by -// ServiceClient's methods. -func NormalizeURL(url string) string { - if !strings.HasSuffix(url, "/") { - return url + "/" - } - return url -} - -// NormalizePathURL is used to convert rawPath to a fqdn, using basePath as -// a reference in the filesystem, if necessary. basePath is assumed to contain -// either '.' when first used, or the file:// type fqdn of the parent resource. -// e.g. myFavScript.yaml => file://opt/lib/myFavScript.yaml -func NormalizePathURL(basePath, rawPath string) (string, error) { - u, err := url.Parse(rawPath) - if err != nil { - return "", err - } - // if a scheme is defined, it must be a fqdn already - if u.Scheme != "" { - return u.String(), nil - } - // if basePath is a url, then child resources are assumed to be relative to it - bu, err := url.Parse(basePath) - if err != nil { - return "", err - } - var basePathSys, absPathSys string - if bu.Scheme != "" { - basePathSys = filepath.FromSlash(bu.Path) - absPathSys = filepath.Join(basePathSys, rawPath) - bu.Path = filepath.ToSlash(absPathSys) - return bu.String(), nil - } - - absPathSys = filepath.Join(basePath, rawPath) - u.Path = filepath.ToSlash(absPathSys) - if err != nil { - return "", err - } - u.Scheme = "file" - return u.String(), nil - -} diff --git a/cmd/bpa-operator/vendor/github.com/gregjones/httpcache/.travis.yml b/cmd/bpa-operator/vendor/github.com/gregjones/httpcache/.travis.yml deleted file mode 100644 index b5ffbe0..0000000 --- a/cmd/bpa-operator/vendor/github.com/gregjones/httpcache/.travis.yml +++ /dev/null @@ -1,19 +0,0 @@ -sudo: false -language: go -go: - - 1.6.x - - 1.7.x - - 1.8.x - - 1.9.x - - master -matrix: - allow_failures: - - go: master - fast_finish: true -install: - - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). -script: - - go get -t -v ./... - - diff -u <(echo -n) <(gofmt -d .) - - go tool vet . - - go test -v -race ./... diff --git a/cmd/bpa-operator/vendor/github.com/gregjones/httpcache/LICENSE.txt b/cmd/bpa-operator/vendor/github.com/gregjones/httpcache/LICENSE.txt deleted file mode 100644 index 81316be..0000000 --- a/cmd/bpa-operator/vendor/github.com/gregjones/httpcache/LICENSE.txt +++ /dev/null @@ -1,7 +0,0 @@ -Copyright © 2012 Greg Jones (greg.jones@gmail.com) - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/cmd/bpa-operator/vendor/github.com/gregjones/httpcache/README.md b/cmd/bpa-operator/vendor/github.com/gregjones/httpcache/README.md deleted file mode 100644 index 09c9e7c..0000000 --- a/cmd/bpa-operator/vendor/github.com/gregjones/httpcache/README.md +++ /dev/null @@ -1,25 +0,0 @@ -httpcache -========= - -[![Build Status](https://travis-ci.org/gregjones/httpcache.svg?branch=master)](https://travis-ci.org/gregjones/httpcache) [![GoDoc](https://godoc.org/github.com/gregjones/httpcache?status.svg)](https://godoc.org/github.com/gregjones/httpcache) - -Package httpcache provides a http.RoundTripper implementation that works as a mostly [RFC 7234](https://tools.ietf.org/html/rfc7234) compliant cache for http responses. - -It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client and not for a shared proxy). - -Cache Backends --------------- - -- The built-in 'memory' cache stores responses in an in-memory map. -- [`github.com/gregjones/httpcache/diskcache`](https://github.com/gregjones/httpcache/tree/master/diskcache) provides a filesystem-backed cache using the [diskv](https://github.com/peterbourgon/diskv) library. -- [`github.com/gregjones/httpcache/memcache`](https://github.com/gregjones/httpcache/tree/master/memcache) provides memcache implementations, for both App Engine and 'normal' memcache servers. -- [`sourcegraph.com/sourcegraph/s3cache`](https://sourcegraph.com/github.com/sourcegraph/s3cache) uses Amazon S3 for storage. -- [`github.com/gregjones/httpcache/leveldbcache`](https://github.com/gregjones/httpcache/tree/master/leveldbcache) provides a filesystem-backed cache using [leveldb](https://github.com/syndtr/goleveldb/leveldb). -- [`github.com/die-net/lrucache`](https://github.com/die-net/lrucache) provides an in-memory cache that will evict least-recently used entries. -- [`github.com/die-net/lrucache/twotier`](https://github.com/die-net/lrucache/tree/master/twotier) allows caches to be combined, for example to use lrucache above with a persistent disk-cache. -- [`github.com/birkelund/boltdbcache`](https://github.com/birkelund/boltdbcache) provides a BoltDB implementation (based on the [bbolt](https://github.com/coreos/bbolt) fork). - -License -------- - -- [MIT License](LICENSE.txt) diff --git a/cmd/bpa-operator/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go b/cmd/bpa-operator/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go deleted file mode 100644 index 42e3129..0000000 --- a/cmd/bpa-operator/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go +++ /dev/null @@ -1,61 +0,0 @@ -// Package diskcache provides an implementation of httpcache.Cache that uses the diskv package -// to supplement an in-memory map with persistent storage -// -package diskcache - -import ( - "bytes" - "crypto/md5" - "encoding/hex" - "github.com/peterbourgon/diskv" - "io" -) - -// Cache is an implementation of httpcache.Cache that supplements the in-memory map with persistent storage -type Cache struct { - d *diskv.Diskv -} - -// Get returns the response corresponding to key if present -func (c *Cache) Get(key string) (resp []byte, ok bool) { - key = keyToFilename(key) - resp, err := c.d.Read(key) - if err != nil { - return []byte{}, false - } - return resp, true -} - -// Set saves a response to the cache as key -func (c *Cache) Set(key string, resp []byte) { - key = keyToFilename(key) - c.d.WriteStream(key, bytes.NewReader(resp), true) -} - -// Delete removes the response with key from the cache -func (c *Cache) Delete(key string) { - key = keyToFilename(key) - c.d.Erase(key) -} - -func keyToFilename(key string) string { - h := md5.New() - io.WriteString(h, key) - return hex.EncodeToString(h.Sum(nil)) -} - -// New returns a new Cache that will store files in basePath -func New(basePath string) *Cache { - return &Cache{ - d: diskv.New(diskv.Options{ - BasePath: basePath, - CacheSizeMax: 100 * 1024 * 1024, // 100MB - }), - } -} - -// NewWithDiskv returns a new Cache using the provided Diskv as underlying -// storage. -func NewWithDiskv(d *diskv.Diskv) *Cache { - return &Cache{d} -} diff --git a/cmd/bpa-operator/vendor/github.com/gregjones/httpcache/httpcache.go b/cmd/bpa-operator/vendor/github.com/gregjones/httpcache/httpcache.go deleted file mode 100644 index f6a2ec4..0000000 --- a/cmd/bpa-operator/vendor/github.com/gregjones/httpcache/httpcache.go +++ /dev/null @@ -1,551 +0,0 @@ -// Package httpcache provides a http.RoundTripper implementation that works as a -// mostly RFC-compliant cache for http responses. -// -// It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client -// and not for a shared proxy). -// -package httpcache - -import ( - "bufio" - "bytes" - "errors" - "io" - "io/ioutil" - "net/http" - "net/http/httputil" - "strings" - "sync" - "time" -) - -const ( - stale = iota - fresh - transparent - // XFromCache is the header added to responses that are returned from the cache - XFromCache = "X-From-Cache" -) - -// A Cache interface is used by the Transport to store and retrieve responses. -type Cache interface { - // Get returns the []byte representation of a cached response and a bool - // set to true if the value isn't empty - Get(key string) (responseBytes []byte, ok bool) - // Set stores the []byte representation of a response against a key - Set(key string, responseBytes []byte) - // Delete removes the value associated with the key - Delete(key string) -} - -// cacheKey returns the cache key for req. -func cacheKey(req *http.Request) string { - if req.Method == http.MethodGet { - return req.URL.String() - } else { - return req.Method + " " + req.URL.String() - } -} - -// CachedResponse returns the cached http.Response for req if present, and nil -// otherwise. -func CachedResponse(c Cache, req *http.Request) (resp *http.Response, err error) { - cachedVal, ok := c.Get(cacheKey(req)) - if !ok { - return - } - - b := bytes.NewBuffer(cachedVal) - return http.ReadResponse(bufio.NewReader(b), req) -} - -// MemoryCache is an implemtation of Cache that stores responses in an in-memory map. -type MemoryCache struct { - mu sync.RWMutex - items map[string][]byte -} - -// Get returns the []byte representation of the response and true if present, false if not -func (c *MemoryCache) Get(key string) (resp []byte, ok bool) { - c.mu.RLock() - resp, ok = c.items[key] - c.mu.RUnlock() - return resp, ok -} - -// Set saves response resp to the cache with key -func (c *MemoryCache) Set(key string, resp []byte) { - c.mu.Lock() - c.items[key] = resp - c.mu.Unlock() -} - -// Delete removes key from the cache -func (c *MemoryCache) Delete(key string) { - c.mu.Lock() - delete(c.items, key) - c.mu.Unlock() -} - -// NewMemoryCache returns a new Cache that will store items in an in-memory map -func NewMemoryCache() *MemoryCache { - c := &MemoryCache{items: map[string][]byte{}} - return c -} - -// Transport is an implementation of http.RoundTripper that will return values from a cache -// where possible (avoiding a network request) and will additionally add validators (etag/if-modified-since) -// to repeated requests allowing servers to return 304 / Not Modified -type Transport struct { - // The RoundTripper interface actually used to make requests - // If nil, http.DefaultTransport is used - Transport http.RoundTripper - Cache Cache - // If true, responses returned from the cache will be given an extra header, X-From-Cache - MarkCachedResponses bool -} - -// NewTransport returns a new Transport with the -// provided Cache implementation and MarkCachedResponses set to true -func NewTransport(c Cache) *Transport { - return &Transport{Cache: c, MarkCachedResponses: true} -} - -// Client returns an *http.Client that caches responses. -func (t *Transport) Client() *http.Client { - return &http.Client{Transport: t} -} - -// varyMatches will return false unless all of the cached values for the headers listed in Vary -// match the new request -func varyMatches(cachedResp *http.Response, req *http.Request) bool { - for _, header := range headerAllCommaSepValues(cachedResp.Header, "vary") { - header = http.CanonicalHeaderKey(header) - if header != "" && req.Header.Get(header) != cachedResp.Header.Get("X-Varied-"+header) { - return false - } - } - return true -} - -// RoundTrip takes a Request and returns a Response -// -// If there is a fresh Response already in cache, then it will be returned without connecting to -// the server. -// -// If there is a stale Response, then any validators it contains will be set on the new request -// to give the server a chance to respond with NotModified. If this happens, then the cached Response -// will be returned. -func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error) { - cacheKey := cacheKey(req) - cacheable := (req.Method == "GET" || req.Method == "HEAD") && req.Header.Get("range") == "" - var cachedResp *http.Response - if cacheable { - cachedResp, err = CachedResponse(t.Cache, req) - } else { - // Need to invalidate an existing value - t.Cache.Delete(cacheKey) - } - - transport := t.Transport - if transport == nil { - transport = http.DefaultTransport - } - - if cacheable && cachedResp != nil && err == nil { - if t.MarkCachedResponses { - cachedResp.Header.Set(XFromCache, "1") - } - - if varyMatches(cachedResp, req) { - // Can only use cached value if the new request doesn't Vary significantly - freshness := getFreshness(cachedResp.Header, req.Header) - if freshness == fresh { - return cachedResp, nil - } - - if freshness == stale { - var req2 *http.Request - // Add validators if caller hasn't already done so - etag := cachedResp.Header.Get("etag") - if etag != "" && req.Header.Get("etag") == "" { - req2 = cloneRequest(req) - req2.Header.Set("if-none-match", etag) - } - lastModified := cachedResp.Header.Get("last-modified") - if lastModified != "" && req.Header.Get("last-modified") == "" { - if req2 == nil { - req2 = cloneRequest(req) - } - req2.Header.Set("if-modified-since", lastModified) - } - if req2 != nil { - req = req2 - } - } - } - - resp, err = transport.RoundTrip(req) - if err == nil && req.Method == "GET" && resp.StatusCode == http.StatusNotModified { - // Replace the 304 response with the one from cache, but update with some new headers - endToEndHeaders := getEndToEndHeaders(resp.Header) - for _, header := range endToEndHeaders { - cachedResp.Header[header] = resp.Header[header] - } - resp = cachedResp - } else if (err != nil || (cachedResp != nil && resp.StatusCode >= 500)) && - req.Method == "GET" && canStaleOnError(cachedResp.Header, req.Header) { - // In case of transport failure and stale-if-error activated, returns cached content - // when available - return cachedResp, nil - } else { - if err != nil || resp.StatusCode != http.StatusOK { - t.Cache.Delete(cacheKey) - } - if err != nil { - return nil, err - } - } - } else { - reqCacheControl := parseCacheControl(req.Header) - if _, ok := reqCacheControl["only-if-cached"]; ok { - resp = newGatewayTimeoutResponse(req) - } else { - resp, err = transport.RoundTrip(req) - if err != nil { - return nil, err - } - } - } - - if cacheable && canStore(parseCacheControl(req.Header), parseCacheControl(resp.Header)) { - for _, varyKey := range headerAllCommaSepValues(resp.Header, "vary") { - varyKey = http.CanonicalHeaderKey(varyKey) - fakeHeader := "X-Varied-" + varyKey - reqValue := req.Header.Get(varyKey) - if reqValue != "" { - resp.Header.Set(fakeHeader, reqValue) - } - } - switch req.Method { - case "GET": - // Delay caching until EOF is reached. - resp.Body = &cachingReadCloser{ - R: resp.Body, - OnEOF: func(r io.Reader) { - resp := *resp - resp.Body = ioutil.NopCloser(r) - respBytes, err := httputil.DumpResponse(&resp, true) - if err == nil { - t.Cache.Set(cacheKey, respBytes) - } - }, - } - default: - respBytes, err := httputil.DumpResponse(resp, true) - if err == nil { - t.Cache.Set(cacheKey, respBytes) - } - } - } else { - t.Cache.Delete(cacheKey) - } - return resp, nil -} - -// ErrNoDateHeader indicates that the HTTP headers contained no Date header. -var ErrNoDateHeader = errors.New("no Date header") - -// Date parses and returns the value of the Date header. -func Date(respHeaders http.Header) (date time.Time, err error) { - dateHeader := respHeaders.Get("date") - if dateHeader == "" { - err = ErrNoDateHeader - return - } - - return time.Parse(time.RFC1123, dateHeader) -} - -type realClock struct{} - -func (c *realClock) since(d time.Time) time.Duration { - return time.Since(d) -} - -type timer interface { - since(d time.Time) time.Duration -} - -var clock timer = &realClock{} - -// getFreshness will return one of fresh/stale/transparent based on the cache-control -// values of the request and the response -// -// fresh indicates the response can be returned -// stale indicates that the response needs validating before it is returned -// transparent indicates the response should not be used to fulfil the request -// -// Because this is only a private cache, 'public' and 'private' in cache-control aren't -// signficant. Similarly, smax-age isn't used. -func getFreshness(respHeaders, reqHeaders http.Header) (freshness int) { - respCacheControl := parseCacheControl(respHeaders) - reqCacheControl := parseCacheControl(reqHeaders) - if _, ok := reqCacheControl["no-cache"]; ok { - return transparent - } - if _, ok := respCacheControl["no-cache"]; ok { - return stale - } - if _, ok := reqCacheControl["only-if-cached"]; ok { - return fresh - } - - date, err := Date(respHeaders) - if err != nil { - return stale - } - currentAge := clock.since(date) - - var lifetime time.Duration - var zeroDuration time.Duration - - // If a response includes both an Expires header and a max-age directive, - // the max-age directive overrides the Expires header, even if the Expires header is more restrictive. - if maxAge, ok := respCacheControl["max-age"]; ok { - lifetime, err = time.ParseDuration(maxAge + "s") - if err != nil { - lifetime = zeroDuration - } - } else { - expiresHeader := respHeaders.Get("Expires") - if expiresHeader != "" { - expires, err := time.Parse(time.RFC1123, expiresHeader) - if err != nil { - lifetime = zeroDuration - } else { - lifetime = expires.Sub(date) - } - } - } - - if maxAge, ok := reqCacheControl["max-age"]; ok { - // the client is willing to accept a response whose age is no greater than the specified time in seconds - lifetime, err = time.ParseDuration(maxAge + "s") - if err != nil { - lifetime = zeroDuration - } - } - if minfresh, ok := reqCacheControl["min-fresh"]; ok { - // the client wants a response that will still be fresh for at least the specified number of seconds. - minfreshDuration, err := time.ParseDuration(minfresh + "s") - if err == nil { - currentAge = time.Duration(currentAge + minfreshDuration) - } - } - - if maxstale, ok := reqCacheControl["max-stale"]; ok { - // Indicates that the client is willing to accept a response that has exceeded its expiration time. - // If max-stale is assigned a value, then the client is willing to accept a response that has exceeded - // its expiration time by no more than the specified number of seconds. - // If no value is assigned to max-stale, then the client is willing to accept a stale response of any age. - // - // Responses served only because of a max-stale value are supposed to have a Warning header added to them, - // but that seems like a hassle, and is it actually useful? If so, then there needs to be a different - // return-value available here. - if maxstale == "" { - return fresh - } - maxstaleDuration, err := time.ParseDuration(maxstale + "s") - if err == nil { - currentAge = time.Duration(currentAge - maxstaleDuration) - } - } - - if lifetime > currentAge { - return fresh - } - - return stale -} - -// Returns true if either the request or the response includes the stale-if-error -// cache control extension: https://tools.ietf.org/html/rfc5861 -func canStaleOnError(respHeaders, reqHeaders http.Header) bool { - respCacheControl := parseCacheControl(respHeaders) - reqCacheControl := parseCacheControl(reqHeaders) - - var err error - lifetime := time.Duration(-1) - - if staleMaxAge, ok := respCacheControl["stale-if-error"]; ok { - if staleMaxAge != "" { - lifetime, err = time.ParseDuration(staleMaxAge + "s") - if err != nil { - return false - } - } else { - return true - } - } - if staleMaxAge, ok := reqCacheControl["stale-if-error"]; ok { - if staleMaxAge != "" { - lifetime, err = time.ParseDuration(staleMaxAge + "s") - if err != nil { - return false - } - } else { - return true - } - } - - if lifetime >= 0 { - date, err := Date(respHeaders) - if err != nil { - return false - } - currentAge := clock.since(date) - if lifetime > currentAge { - return true - } - } - - return false -} - -func getEndToEndHeaders(respHeaders http.Header) []string { - // These headers are always hop-by-hop - hopByHopHeaders := map[string]struct{}{ - "Connection": struct{}{}, - "Keep-Alive": struct{}{}, - "Proxy-Authenticate": struct{}{}, - "Proxy-Authorization": struct{}{}, - "Te": struct{}{}, - "Trailers": struct{}{}, - "Transfer-Encoding": struct{}{}, - "Upgrade": struct{}{}, - } - - for _, extra := range strings.Split(respHeaders.Get("connection"), ",") { - // any header listed in connection, if present, is also considered hop-by-hop - if strings.Trim(extra, " ") != "" { - hopByHopHeaders[http.CanonicalHeaderKey(extra)] = struct{}{} - } - } - endToEndHeaders := []string{} - for respHeader, _ := range respHeaders { - if _, ok := hopByHopHeaders[respHeader]; !ok { - endToEndHeaders = append(endToEndHeaders, respHeader) - } - } - return endToEndHeaders -} - -func canStore(reqCacheControl, respCacheControl cacheControl) (canStore bool) { - if _, ok := respCacheControl["no-store"]; ok { - return false - } - if _, ok := reqCacheControl["no-store"]; ok { - return false - } - return true -} - -func newGatewayTimeoutResponse(req *http.Request) *http.Response { - var braw bytes.Buffer - braw.WriteString("HTTP/1.1 504 Gateway Timeout\r\n\r\n") - resp, err := http.ReadResponse(bufio.NewReader(&braw), req) - if err != nil { - panic(err) - } - return resp -} - -// cloneRequest returns a clone of the provided *http.Request. -// The clone is a shallow copy of the struct and its Header map. -// (This function copyright goauth2 authors: https://code.google.com/p/goauth2) -func cloneRequest(r *http.Request) *http.Request { - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *r - // deep copy of the Header - r2.Header = make(http.Header) - for k, s := range r.Header { - r2.Header[k] = s - } - return r2 -} - -type cacheControl map[string]string - -func parseCacheControl(headers http.Header) cacheControl { - cc := cacheControl{} - ccHeader := headers.Get("Cache-Control") - for _, part := range strings.Split(ccHeader, ",") { - part = strings.Trim(part, " ") - if part == "" { - continue - } - if strings.ContainsRune(part, '=') { - keyval := strings.Split(part, "=") - cc[strings.Trim(keyval[0], " ")] = strings.Trim(keyval[1], ",") - } else { - cc[part] = "" - } - } - return cc -} - -// headerAllCommaSepValues returns all comma-separated values (each -// with whitespace trimmed) for header name in headers. According to -// Section 4.2 of the HTTP/1.1 spec -// (http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2), -// values from multiple occurrences of a header should be concatenated, if -// the header's value is a comma-separated list. -func headerAllCommaSepValues(headers http.Header, name string) []string { - var vals []string - for _, val := range headers[http.CanonicalHeaderKey(name)] { - fields := strings.Split(val, ",") - for i, f := range fields { - fields[i] = strings.TrimSpace(f) - } - vals = append(vals, fields...) - } - return vals -} - -// cachingReadCloser is a wrapper around ReadCloser R that calls OnEOF -// handler with a full copy of the content read from R when EOF is -// reached. -type cachingReadCloser struct { - // Underlying ReadCloser. - R io.ReadCloser - // OnEOF is called with a copy of the content of R when EOF is reached. - OnEOF func(io.Reader) - - buf bytes.Buffer // buf stores a copy of the content of R. -} - -// Read reads the next len(p) bytes from R or until R is drained. The -// return value n is the number of bytes read. If R has no data to -// return, err is io.EOF and OnEOF is called with a full copy of what -// has been read so far. -func (r *cachingReadCloser) Read(p []byte) (n int, err error) { - n, err = r.R.Read(p) - r.buf.Write(p[:n]) - if err == io.EOF { - r.OnEOF(bytes.NewReader(r.buf.Bytes())) - } - return n, err -} - -func (r *cachingReadCloser) Close() error { - return r.R.Close() -} - -// NewMemoryCacheTransport returns a new Transport using the in-memory cache implementation -func NewMemoryCacheTransport() *Transport { - c := NewMemoryCache() - t := NewTransport(c) - return t -} diff --git a/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt b/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt deleted file mode 100644 index 3645162..0000000 --- a/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2015, Gengo, Inc. -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - * Neither the name of Gengo, Inc. nor the names of its - contributors may be used to endorse or promote products derived from this - software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel b/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel deleted file mode 100644 index 76cafe6..0000000 --- a/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel +++ /dev/null @@ -1,22 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") -load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") - -package(default_visibility = ["//visibility:public"]) - -proto_library( - name = "internal_proto", - srcs = ["stream_chunk.proto"], - deps = ["@com_google_protobuf//:any_proto"], -) - -go_proto_library( - name = "internal_go_proto", - importpath = "github.com/grpc-ecosystem/grpc-gateway/internal", - proto = ":internal_proto", -) - -go_library( - name = "go_default_library", - embed = [":internal_go_proto"], - importpath = "github.com/grpc-ecosystem/grpc-gateway/internal", -) diff --git a/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.pb.go b/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.pb.go deleted file mode 100644 index 8858f06..0000000 --- a/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.pb.go +++ /dev/null @@ -1,118 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: internal/stream_chunk.proto - -package internal - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import any "github.com/golang/protobuf/ptypes/any" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// StreamError is a response type which is returned when -// streaming rpc returns an error. -type StreamError struct { - GrpcCode int32 `protobuf:"varint,1,opt,name=grpc_code,json=grpcCode,proto3" json:"grpc_code,omitempty"` - HttpCode int32 `protobuf:"varint,2,opt,name=http_code,json=httpCode,proto3" json:"http_code,omitempty"` - Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"` - HttpStatus string `protobuf:"bytes,4,opt,name=http_status,json=httpStatus,proto3" json:"http_status,omitempty"` - Details []*any.Any `protobuf:"bytes,5,rep,name=details,proto3" json:"details,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StreamError) Reset() { *m = StreamError{} } -func (m *StreamError) String() string { return proto.CompactTextString(m) } -func (*StreamError) ProtoMessage() {} -func (*StreamError) Descriptor() ([]byte, []int) { - return fileDescriptor_stream_chunk_a2afb657504565d7, []int{0} -} -func (m *StreamError) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StreamError.Unmarshal(m, b) -} -func (m *StreamError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StreamError.Marshal(b, m, deterministic) -} -func (dst *StreamError) XXX_Merge(src proto.Message) { - xxx_messageInfo_StreamError.Merge(dst, src) -} -func (m *StreamError) XXX_Size() int { - return xxx_messageInfo_StreamError.Size(m) -} -func (m *StreamError) XXX_DiscardUnknown() { - xxx_messageInfo_StreamError.DiscardUnknown(m) -} - -var xxx_messageInfo_StreamError proto.InternalMessageInfo - -func (m *StreamError) GetGrpcCode() int32 { - if m != nil { - return m.GrpcCode - } - return 0 -} - -func (m *StreamError) GetHttpCode() int32 { - if m != nil { - return m.HttpCode - } - return 0 -} - -func (m *StreamError) GetMessage() string { - if m != nil { - return m.Message - } - return "" -} - -func (m *StreamError) GetHttpStatus() string { - if m != nil { - return m.HttpStatus - } - return "" -} - -func (m *StreamError) GetDetails() []*any.Any { - if m != nil { - return m.Details - } - return nil -} - -func init() { - proto.RegisterType((*StreamError)(nil), "grpc.gateway.runtime.StreamError") -} - -func init() { - proto.RegisterFile("internal/stream_chunk.proto", fileDescriptor_stream_chunk_a2afb657504565d7) -} - -var fileDescriptor_stream_chunk_a2afb657504565d7 = []byte{ - // 223 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x34, 0x90, 0x41, 0x4e, 0xc3, 0x30, - 0x10, 0x45, 0x15, 0x4a, 0x69, 0x3b, 0xd9, 0x45, 0x5d, 0x18, 0xba, 0x20, 0x62, 0x95, 0x95, 0x23, - 0xc1, 0x09, 0x00, 0x71, 0x81, 0x74, 0xc7, 0xa6, 0x9a, 0x26, 0x83, 0x13, 0x91, 0xd8, 0xd1, 0x78, - 0x22, 0x94, 0x6b, 0x71, 0xc2, 0xca, 0x8e, 0xb2, 0xf4, 0x7b, 0x7f, 0xbe, 0xbe, 0x0c, 0xa7, 0xce, - 0x0a, 0xb1, 0xc5, 0xbe, 0xf4, 0xc2, 0x84, 0xc3, 0xa5, 0x6e, 0x27, 0xfb, 0xab, 0x47, 0x76, 0xe2, - 0xb2, 0xa3, 0xe1, 0xb1, 0xd6, 0x06, 0x85, 0xfe, 0x70, 0xd6, 0x3c, 0x59, 0xe9, 0x06, 0x7a, 0x7a, - 0x34, 0xce, 0x99, 0x9e, 0xca, 0x98, 0xb9, 0x4e, 0x3f, 0x25, 0xda, 0x79, 0x39, 0x78, 0xf9, 0x4f, - 0x20, 0x3d, 0xc7, 0x9e, 0x2f, 0x66, 0xc7, 0xd9, 0x09, 0x0e, 0xa1, 0xe2, 0x52, 0xbb, 0x86, 0x54, - 0x92, 0x27, 0xc5, 0xb6, 0xda, 0x07, 0xf0, 0xe9, 0x1a, 0x0a, 0xb2, 0x15, 0x19, 0x17, 0x79, 0xb7, - 0xc8, 0x00, 0xa2, 0x54, 0xb0, 0x1b, 0xc8, 0x7b, 0x34, 0xa4, 0x36, 0x79, 0x52, 0x1c, 0xaa, 0xf5, - 0x99, 0x3d, 0x43, 0x1a, 0xcf, 0xbc, 0xa0, 0x4c, 0x5e, 0xdd, 0x47, 0x0b, 0x01, 0x9d, 0x23, 0xc9, - 0x34, 0xec, 0x1a, 0x12, 0xec, 0x7a, 0xaf, 0xb6, 0xf9, 0xa6, 0x48, 0x5f, 0x8f, 0x7a, 0x59, 0xac, - 0xd7, 0xc5, 0xfa, 0xdd, 0xce, 0xd5, 0x1a, 0xfa, 0x80, 0xef, 0xfd, 0xfa, 0x09, 0xd7, 0x87, 0x18, - 0x79, 0xbb, 0x05, 0x00, 0x00, 0xff, 0xff, 0x0d, 0x7d, 0xa5, 0x18, 0x17, 0x01, 0x00, 0x00, -} diff --git a/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.proto b/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.proto deleted file mode 100644 index 55f42ce..0000000 --- a/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.proto +++ /dev/null @@ -1,15 +0,0 @@ -syntax = "proto3"; -package grpc.gateway.runtime; -option go_package = "internal"; - -import "google/protobuf/any.proto"; - -// StreamError is a response type which is returned when -// streaming rpc returns an error. -message StreamError { - int32 grpc_code = 1; - int32 http_code = 2; - string message = 3; - string http_status = 4; - repeated google.protobuf.Any details = 5; -} diff --git a/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel b/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel deleted file mode 100644 index c99f83e..0000000 --- a/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel +++ /dev/null @@ -1,80 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -package(default_visibility = ["//visibility:public"]) - -go_library( - name = "go_default_library", - srcs = [ - "context.go", - "convert.go", - "doc.go", - "errors.go", - "fieldmask.go", - "handler.go", - "marshal_json.go", - "marshal_jsonpb.go", - "marshal_proto.go", - "marshaler.go", - "marshaler_registry.go", - "mux.go", - "pattern.go", - "proto2_convert.go", - "proto_errors.go", - "query.go", - ], - importpath = "github.com/grpc-ecosystem/grpc-gateway/runtime", - deps = [ - "//internal:go_default_library", - "//utilities:go_default_library", - "@com_github_golang_protobuf//jsonpb:go_default_library_gen", - "@com_github_golang_protobuf//proto:go_default_library", - "@com_github_golang_protobuf//protoc-gen-go/generator:go_default_library_gen", - "@io_bazel_rules_go//proto/wkt:any_go_proto", - "@io_bazel_rules_go//proto/wkt:duration_go_proto", - "@io_bazel_rules_go//proto/wkt:field_mask_go_proto", - "@io_bazel_rules_go//proto/wkt:timestamp_go_proto", - "@io_bazel_rules_go//proto/wkt:wrappers_go_proto", - "@org_golang_google_grpc//codes:go_default_library", - "@org_golang_google_grpc//grpclog:go_default_library", - "@org_golang_google_grpc//metadata:go_default_library", - "@org_golang_google_grpc//status:go_default_library", - ], -) - -go_test( - name = "go_default_test", - size = "small", - srcs = [ - "context_test.go", - "errors_test.go", - "fieldmask_test.go", - "handler_test.go", - "marshal_json_test.go", - "marshal_jsonpb_test.go", - "marshal_proto_test.go", - "marshaler_registry_test.go", - "mux_test.go", - "pattern_test.go", - "query_test.go", - ], - embed = [":go_default_library"], - deps = [ - "//examples/proto/examplepb:go_default_library", - "//internal:go_default_library", - "//utilities:go_default_library", - "@com_github_golang_protobuf//jsonpb:go_default_library_gen", - "@com_github_golang_protobuf//proto:go_default_library", - "@com_github_golang_protobuf//ptypes:go_default_library_gen", - "@go_googleapis//google/rpc:errdetails_go_proto", - "@io_bazel_rules_go//proto/wkt:duration_go_proto", - "@io_bazel_rules_go//proto/wkt:empty_go_proto", - "@io_bazel_rules_go//proto/wkt:field_mask_go_proto", - "@io_bazel_rules_go//proto/wkt:struct_go_proto", - "@io_bazel_rules_go//proto/wkt:timestamp_go_proto", - "@io_bazel_rules_go//proto/wkt:wrappers_go_proto", - "@org_golang_google_grpc//:go_default_library", - "@org_golang_google_grpc//codes:go_default_library", - "@org_golang_google_grpc//metadata:go_default_library", - "@org_golang_google_grpc//status:go_default_library", - ], -) diff --git a/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go b/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go deleted file mode 100644 index 896057e..0000000 --- a/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go +++ /dev/null @@ -1,210 +0,0 @@ -package runtime - -import ( - "context" - "encoding/base64" - "fmt" - "net" - "net/http" - "net/textproto" - "strconv" - "strings" - "time" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -// MetadataHeaderPrefix is the http prefix that represents custom metadata -// parameters to or from a gRPC call. -const MetadataHeaderPrefix = "Grpc-Metadata-" - -// MetadataPrefix is prepended to permanent HTTP header keys (as specified -// by the IANA) when added to the gRPC context. -const MetadataPrefix = "grpcgateway-" - -// MetadataTrailerPrefix is prepended to gRPC metadata as it is converted to -// HTTP headers in a response handled by grpc-gateway -const MetadataTrailerPrefix = "Grpc-Trailer-" - -const metadataGrpcTimeout = "Grpc-Timeout" -const metadataHeaderBinarySuffix = "-Bin" - -const xForwardedFor = "X-Forwarded-For" -const xForwardedHost = "X-Forwarded-Host" - -var ( - // DefaultContextTimeout is used for gRPC call context.WithTimeout whenever a Grpc-Timeout inbound - // header isn't present. If the value is 0 the sent `context` will not have a timeout. - DefaultContextTimeout = 0 * time.Second -) - -func decodeBinHeader(v string) ([]byte, error) { - if len(v)%4 == 0 { - // Input was padded, or padding was not necessary. - return base64.StdEncoding.DecodeString(v) - } - return base64.RawStdEncoding.DecodeString(v) -} - -/* -AnnotateContext adds context information such as metadata from the request. - -At a minimum, the RemoteAddr is included in the fashion of "X-Forwarded-For", -except that the forwarded destination is not another HTTP service but rather -a gRPC service. -*/ -func AnnotateContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, error) { - var pairs []string - timeout := DefaultContextTimeout - if tm := req.Header.Get(metadataGrpcTimeout); tm != "" { - var err error - timeout, err = timeoutDecode(tm) - if err != nil { - return nil, status.Errorf(codes.InvalidArgument, "invalid grpc-timeout: %s", tm) - } - } - - for key, vals := range req.Header { - for _, val := range vals { - key = textproto.CanonicalMIMEHeaderKey(key) - // For backwards-compatibility, pass through 'authorization' header with no prefix. - if key == "Authorization" { - pairs = append(pairs, "authorization", val) - } - if h, ok := mux.incomingHeaderMatcher(key); ok { - // Handles "-bin" metadata in grpc, since grpc will do another base64 - // encode before sending to server, we need to decode it first. - if strings.HasSuffix(key, metadataHeaderBinarySuffix) { - b, err := decodeBinHeader(val) - if err != nil { - return nil, status.Errorf(codes.InvalidArgument, "invalid binary header %s: %s", key, err) - } - - val = string(b) - } - pairs = append(pairs, h, val) - } - } - } - if host := req.Header.Get(xForwardedHost); host != "" { - pairs = append(pairs, strings.ToLower(xForwardedHost), host) - } else if req.Host != "" { - pairs = append(pairs, strings.ToLower(xForwardedHost), req.Host) - } - - if addr := req.RemoteAddr; addr != "" { - if remoteIP, _, err := net.SplitHostPort(addr); err == nil { - if fwd := req.Header.Get(xForwardedFor); fwd == "" { - pairs = append(pairs, strings.ToLower(xForwardedFor), remoteIP) - } else { - pairs = append(pairs, strings.ToLower(xForwardedFor), fmt.Sprintf("%s, %s", fwd, remoteIP)) - } - } else { - grpclog.Infof("invalid remote addr: %s", addr) - } - } - - if timeout != 0 { - ctx, _ = context.WithTimeout(ctx, timeout) - } - if len(pairs) == 0 { - return ctx, nil - } - md := metadata.Pairs(pairs...) - for _, mda := range mux.metadataAnnotators { - md = metadata.Join(md, mda(ctx, req)) - } - return metadata.NewOutgoingContext(ctx, md), nil -} - -// ServerMetadata consists of metadata sent from gRPC server. -type ServerMetadata struct { - HeaderMD metadata.MD - TrailerMD metadata.MD -} - -type serverMetadataKey struct{} - -// NewServerMetadataContext creates a new context with ServerMetadata -func NewServerMetadataContext(ctx context.Context, md ServerMetadata) context.Context { - return context.WithValue(ctx, serverMetadataKey{}, md) -} - -// ServerMetadataFromContext returns the ServerMetadata in ctx -func ServerMetadataFromContext(ctx context.Context) (md ServerMetadata, ok bool) { - md, ok = ctx.Value(serverMetadataKey{}).(ServerMetadata) - return -} - -func timeoutDecode(s string) (time.Duration, error) { - size := len(s) - if size < 2 { - return 0, fmt.Errorf("timeout string is too short: %q", s) - } - d, ok := timeoutUnitToDuration(s[size-1]) - if !ok { - return 0, fmt.Errorf("timeout unit is not recognized: %q", s) - } - t, err := strconv.ParseInt(s[:size-1], 10, 64) - if err != nil { - return 0, err - } - return d * time.Duration(t), nil -} - -func timeoutUnitToDuration(u uint8) (d time.Duration, ok bool) { - switch u { - case 'H': - return time.Hour, true - case 'M': - return time.Minute, true - case 'S': - return time.Second, true - case 'm': - return time.Millisecond, true - case 'u': - return time.Microsecond, true - case 'n': - return time.Nanosecond, true - default: - } - return -} - -// isPermanentHTTPHeader checks whether hdr belongs to the list of -// permenant request headers maintained by IANA. -// http://www.iana.org/assignments/message-headers/message-headers.xml -func isPermanentHTTPHeader(hdr string) bool { - switch hdr { - case - "Accept", - "Accept-Charset", - "Accept-Language", - "Accept-Ranges", - "Authorization", - "Cache-Control", - "Content-Type", - "Cookie", - "Date", - "Expect", - "From", - "Host", - "If-Match", - "If-Modified-Since", - "If-None-Match", - "If-Schedule-Tag-Match", - "If-Unmodified-Since", - "Max-Forwards", - "Origin", - "Pragma", - "Referer", - "User-Agent", - "Via", - "Warning": - return true - } - return false -} diff --git a/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go b/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go deleted file mode 100644 index a5b3bd6..0000000 --- a/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go +++ /dev/null @@ -1,312 +0,0 @@ -package runtime - -import ( - "encoding/base64" - "fmt" - "strconv" - "strings" - - "github.com/golang/protobuf/jsonpb" - "github.com/golang/protobuf/ptypes/duration" - "github.com/golang/protobuf/ptypes/timestamp" - "github.com/golang/protobuf/ptypes/wrappers" -) - -// String just returns the given string. -// It is just for compatibility to other types. -func String(val string) (string, error) { - return val, nil -} - -// StringSlice converts 'val' where individual strings are separated by -// 'sep' into a string slice. -func StringSlice(val, sep string) ([]string, error) { - return strings.Split(val, sep), nil -} - -// Bool converts the given string representation of a boolean value into bool. -func Bool(val string) (bool, error) { - return strconv.ParseBool(val) -} - -// BoolSlice converts 'val' where individual booleans are separated by -// 'sep' into a bool slice. -func BoolSlice(val, sep string) ([]bool, error) { - s := strings.Split(val, sep) - values := make([]bool, len(s)) - for i, v := range s { - value, err := Bool(v) - if err != nil { - return values, err - } - values[i] = value - } - return values, nil -} - -// Float64 converts the given string representation into representation of a floating point number into float64. -func Float64(val string) (float64, error) { - return strconv.ParseFloat(val, 64) -} - -// Float64Slice converts 'val' where individual floating point numbers are separated by -// 'sep' into a float64 slice. -func Float64Slice(val, sep string) ([]float64, error) { - s := strings.Split(val, sep) - values := make([]float64, len(s)) - for i, v := range s { - value, err := Float64(v) - if err != nil { - return values, err - } - values[i] = value - } - return values, nil -} - -// Float32 converts the given string representation of a floating point number into float32. -func Float32(val string) (float32, error) { - f, err := strconv.ParseFloat(val, 32) - if err != nil { - return 0, err - } - return float32(f), nil -} - -// Float32Slice converts 'val' where individual floating point numbers are separated by -// 'sep' into a float32 slice. -func Float32Slice(val, sep string) ([]float32, error) { - s := strings.Split(val, sep) - values := make([]float32, len(s)) - for i, v := range s { - value, err := Float32(v) - if err != nil { - return values, err - } - values[i] = value - } - return values, nil -} - -// Int64 converts the given string representation of an integer into int64. -func Int64(val string) (int64, error) { - return strconv.ParseInt(val, 0, 64) -} - -// Int64Slice converts 'val' where individual integers are separated by -// 'sep' into a int64 slice. -func Int64Slice(val, sep string) ([]int64, error) { - s := strings.Split(val, sep) - values := make([]int64, len(s)) - for i, v := range s { - value, err := Int64(v) - if err != nil { - return values, err - } - values[i] = value - } - return values, nil -} - -// Int32 converts the given string representation of an integer into int32. -func Int32(val string) (int32, error) { - i, err := strconv.ParseInt(val, 0, 32) - if err != nil { - return 0, err - } - return int32(i), nil -} - -// Int32Slice converts 'val' where individual integers are separated by -// 'sep' into a int32 slice. -func Int32Slice(val, sep string) ([]int32, error) { - s := strings.Split(val, sep) - values := make([]int32, len(s)) - for i, v := range s { - value, err := Int32(v) - if err != nil { - return values, err - } - values[i] = value - } - return values, nil -} - -// Uint64 converts the given string representation of an integer into uint64. -func Uint64(val string) (uint64, error) { - return strconv.ParseUint(val, 0, 64) -} - -// Uint64Slice converts 'val' where individual integers are separated by -// 'sep' into a uint64 slice. -func Uint64Slice(val, sep string) ([]uint64, error) { - s := strings.Split(val, sep) - values := make([]uint64, len(s)) - for i, v := range s { - value, err := Uint64(v) - if err != nil { - return values, err - } - values[i] = value - } - return values, nil -} - -// Uint32 converts the given string representation of an integer into uint32. -func Uint32(val string) (uint32, error) { - i, err := strconv.ParseUint(val, 0, 32) - if err != nil { - return 0, err - } - return uint32(i), nil -} - -// Uint32Slice converts 'val' where individual integers are separated by -// 'sep' into a uint32 slice. -func Uint32Slice(val, sep string) ([]uint32, error) { - s := strings.Split(val, sep) - values := make([]uint32, len(s)) - for i, v := range s { - value, err := Uint32(v) - if err != nil { - return values, err - } - values[i] = value - } - return values, nil -} - -// Bytes converts the given string representation of a byte sequence into a slice of bytes -// A bytes sequence is encoded in URL-safe base64 without padding -func Bytes(val string) ([]byte, error) { - b, err := base64.StdEncoding.DecodeString(val) - if err != nil { - b, err = base64.URLEncoding.DecodeString(val) - if err != nil { - return nil, err - } - } - return b, nil -} - -// BytesSlice converts 'val' where individual bytes sequences, encoded in URL-safe -// base64 without padding, are separated by 'sep' into a slice of bytes slices slice. -func BytesSlice(val, sep string) ([][]byte, error) { - s := strings.Split(val, sep) - values := make([][]byte, len(s)) - for i, v := range s { - value, err := Bytes(v) - if err != nil { - return values, err - } - values[i] = value - } - return values, nil -} - -// Timestamp converts the given RFC3339 formatted string into a timestamp.Timestamp. -func Timestamp(val string) (*timestamp.Timestamp, error) { - var r *timestamp.Timestamp - err := jsonpb.UnmarshalString(val, r) - return r, err -} - -// Duration converts the given string into a timestamp.Duration. -func Duration(val string) (*duration.Duration, error) { - var r *duration.Duration - err := jsonpb.UnmarshalString(val, r) - return r, err -} - -// Enum converts the given string into an int32 that should be type casted into the -// correct enum proto type. -func Enum(val string, enumValMap map[string]int32) (int32, error) { - e, ok := enumValMap[val] - if ok { - return e, nil - } - - i, err := Int32(val) - if err != nil { - return 0, fmt.Errorf("%s is not valid", val) - } - for _, v := range enumValMap { - if v == i { - return i, nil - } - } - return 0, fmt.Errorf("%s is not valid", val) -} - -// EnumSlice converts 'val' where individual enums are separated by 'sep' -// into a int32 slice. Each individual int32 should be type casted into the -// correct enum proto type. -func EnumSlice(val, sep string, enumValMap map[string]int32) ([]int32, error) { - s := strings.Split(val, sep) - values := make([]int32, len(s)) - for i, v := range s { - value, err := Enum(v, enumValMap) - if err != nil { - return values, err - } - values[i] = value - } - return values, nil -} - -/* - Support fot google.protobuf.wrappers on top of primitive types -*/ - -// StringValue well-known type support as wrapper around string type -func StringValue(val string) (*wrappers.StringValue, error) { - return &wrappers.StringValue{Value: val}, nil -} - -// FloatValue well-known type support as wrapper around float32 type -func FloatValue(val string) (*wrappers.FloatValue, error) { - parsedVal, err := Float32(val) - return &wrappers.FloatValue{Value: parsedVal}, err -} - -// DoubleValue well-known type support as wrapper around float64 type -func DoubleValue(val string) (*wrappers.DoubleValue, error) { - parsedVal, err := Float64(val) - return &wrappers.DoubleValue{Value: parsedVal}, err -} - -// BoolValue well-known type support as wrapper around bool type -func BoolValue(val string) (*wrappers.BoolValue, error) { - parsedVal, err := Bool(val) - return &wrappers.BoolValue{Value: parsedVal}, err -} - -// Int32Value well-known type support as wrapper around int32 type -func Int32Value(val string) (*wrappers.Int32Value, error) { - parsedVal, err := Int32(val) - return &wrappers.Int32Value{Value: parsedVal}, err -} - -// UInt32Value well-known type support as wrapper around uint32 type -func UInt32Value(val string) (*wrappers.UInt32Value, error) { - parsedVal, err := Uint32(val) - return &wrappers.UInt32Value{Value: parsedVal}, err -} - -// Int64Value well-known type support as wrapper around int64 type -func Int64Value(val string) (*wrappers.Int64Value, error) { - parsedVal, err := Int64(val) - return &wrappers.Int64Value{Value: parsedVal}, err -} - -// UInt64Value well-known type support as wrapper around uint64 type -func UInt64Value(val string) (*wrappers.UInt64Value, error) { - parsedVal, err := Uint64(val) - return &wrappers.UInt64Value{Value: parsedVal}, err -} - -// BytesValue well-known type support as wrapper around bytes[] type -func BytesValue(val string) (*wrappers.BytesValue, error) { - parsedVal, err := Bytes(val) - return &wrappers.BytesValue{Value: parsedVal}, err -} diff --git a/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go b/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go deleted file mode 100644 index b6e5ddf..0000000 --- a/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -/* -Package runtime contains runtime helper functions used by -servers which protoc-gen-grpc-gateway generates. -*/ -package runtime diff --git a/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go b/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go deleted file mode 100644 index 41d54ef..0000000 --- a/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go +++ /dev/null @@ -1,145 +0,0 @@ -package runtime - -import ( - "context" - "io" - "net/http" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes/any" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/status" -) - -// HTTPStatusFromCode converts a gRPC error code into the corresponding HTTP response status. -// See: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto -func HTTPStatusFromCode(code codes.Code) int { - switch code { - case codes.OK: - return http.StatusOK - case codes.Canceled: - return http.StatusRequestTimeout - case codes.Unknown: - return http.StatusInternalServerError - case codes.InvalidArgument: - return http.StatusBadRequest - case codes.DeadlineExceeded: - return http.StatusGatewayTimeout - case codes.NotFound: - return http.StatusNotFound - case codes.AlreadyExists: - return http.StatusConflict - case codes.PermissionDenied: - return http.StatusForbidden - case codes.Unauthenticated: - return http.StatusUnauthorized - case codes.ResourceExhausted: - return http.StatusTooManyRequests - case codes.FailedPrecondition: - return http.StatusPreconditionFailed - case codes.Aborted: - return http.StatusConflict - case codes.OutOfRange: - return http.StatusBadRequest - case codes.Unimplemented: - return http.StatusNotImplemented - case codes.Internal: - return http.StatusInternalServerError - case codes.Unavailable: - return http.StatusServiceUnavailable - case codes.DataLoss: - return http.StatusInternalServerError - } - - grpclog.Infof("Unknown gRPC error code: %v", code) - return http.StatusInternalServerError -} - -var ( - // HTTPError replies to the request with the error. - // You can set a custom function to this variable to customize error format. - HTTPError = DefaultHTTPError - // OtherErrorHandler handles the following error used by the gateway: StatusMethodNotAllowed StatusNotFound and StatusBadRequest - OtherErrorHandler = DefaultOtherErrorHandler -) - -type errorBody struct { - Error string `protobuf:"bytes,1,name=error" json:"error"` - // This is to make the error more compatible with users that expect errors to be Status objects: - // https://github.com/grpc/grpc/blob/master/src/proto/grpc/status/status.proto - // It should be the exact same message as the Error field. - Message string `protobuf:"bytes,1,name=message" json:"message"` - Code int32 `protobuf:"varint,2,name=code" json:"code"` - Details []*any.Any `protobuf:"bytes,3,rep,name=details" json:"details,omitempty"` -} - -// Make this also conform to proto.Message for builtin JSONPb Marshaler -func (e *errorBody) Reset() { *e = errorBody{} } -func (e *errorBody) String() string { return proto.CompactTextString(e) } -func (*errorBody) ProtoMessage() {} - -// DefaultHTTPError is the default implementation of HTTPError. -// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode. -// If otherwise, it replies with http.StatusInternalServerError. -// -// The response body returned by this function is a JSON object, -// which contains a member whose key is "error" and whose value is err.Error(). -func DefaultHTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) { - const fallback = `{"error": "failed to marshal error message"}` - - s, ok := status.FromError(err) - if !ok { - s = status.New(codes.Unknown, err.Error()) - } - - w.Header().Del("Trailer") - - contentType := marshaler.ContentType() - // Check marshaler on run time in order to keep backwards compatability - // An interface param needs to be added to the ContentType() function on - // the Marshal interface to be able to remove this check - if httpBodyMarshaler, ok := marshaler.(*HTTPBodyMarshaler); ok { - pb := s.Proto() - contentType = httpBodyMarshaler.ContentTypeFromMessage(pb) - } - w.Header().Set("Content-Type", contentType) - - body := &errorBody{ - Error: s.Message(), - Message: s.Message(), - Code: int32(s.Code()), - Details: s.Proto().GetDetails(), - } - - buf, merr := marshaler.Marshal(body) - if merr != nil { - grpclog.Infof("Failed to marshal error message %q: %v", body, merr) - w.WriteHeader(http.StatusInternalServerError) - if _, err := io.WriteString(w, fallback); err != nil { - grpclog.Infof("Failed to write response: %v", err) - } - return - } - - md, ok := ServerMetadataFromContext(ctx) - if !ok { - grpclog.Infof("Failed to extract ServerMetadata from context") - } - - handleForwardResponseServerMetadata(w, mux, md) - handleForwardResponseTrailerHeader(w, md) - st := HTTPStatusFromCode(s.Code()) - w.WriteHeader(st) - if _, err := w.Write(buf); err != nil { - grpclog.Infof("Failed to write response: %v", err) - } - - handleForwardResponseTrailer(w, md) -} - -// DefaultOtherErrorHandler is the default implementation of OtherErrorHandler. -// It simply writes a string representation of the given error into "w". -func DefaultOtherErrorHandler(w http.ResponseWriter, _ *http.Request, msg string, code int) { - http.Error(w, msg, code) -} diff --git a/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go b/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go deleted file mode 100644 index e1cf7a9..0000000 --- a/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go +++ /dev/null @@ -1,70 +0,0 @@ -package runtime - -import ( - "encoding/json" - "io" - "strings" - - "github.com/golang/protobuf/protoc-gen-go/generator" - "google.golang.org/genproto/protobuf/field_mask" -) - -// FieldMaskFromRequestBody creates a FieldMask printing all complete paths from the JSON body. -func FieldMaskFromRequestBody(r io.Reader) (*field_mask.FieldMask, error) { - fm := &field_mask.FieldMask{} - var root interface{} - if err := json.NewDecoder(r).Decode(&root); err != nil { - if err == io.EOF { - return fm, nil - } - return nil, err - } - - queue := []fieldMaskPathItem{{node: root}} - for len(queue) > 0 { - // dequeue an item - item := queue[0] - queue = queue[1:] - - if m, ok := item.node.(map[string]interface{}); ok { - // if the item is an object, then enqueue all of its children - for k, v := range m { - queue = append(queue, fieldMaskPathItem{path: append(item.path, generator.CamelCase(k)), node: v}) - } - } else if len(item.path) > 0 { - // otherwise, it's a leaf node so print its path - fm.Paths = append(fm.Paths, strings.Join(item.path, ".")) - } - } - - return fm, nil -} - -// fieldMaskPathItem stores a in-progress deconstruction of a path for a fieldmask -type fieldMaskPathItem struct { - // the list of prior fields leading up to node - path []string - - // a generic decoded json object the current item to inspect for further path extraction - node interface{} -} - -// CamelCaseFieldMask updates the given FieldMask by converting all of its paths to CamelCase, using the same heuristic -// that's used for naming protobuf fields in Go. -func CamelCaseFieldMask(mask *field_mask.FieldMask) { - if mask == nil || mask.Paths == nil { - return - } - - var newPaths []string - for _, path := range mask.Paths { - lowerCasedParts := strings.Split(path, ".") - var camelCasedParts []string - for _, part := range lowerCasedParts { - camelCasedParts = append(camelCasedParts, generator.CamelCase(part)) - } - newPaths = append(newPaths, strings.Join(camelCasedParts, ".")) - } - - mask.Paths = newPaths -} diff --git a/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go b/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go deleted file mode 100644 index 1fc63f7..0000000 --- a/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go +++ /dev/null @@ -1,215 +0,0 @@ -package runtime - -import ( - "fmt" - "io" - "net/http" - "net/textproto" - - "context" - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes/any" - "github.com/grpc-ecosystem/grpc-gateway/internal" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/status" -) - -// ForwardResponseStream forwards the stream from gRPC server to REST client. -func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { - f, ok := w.(http.Flusher) - if !ok { - grpclog.Infof("Flush not supported in %T", w) - http.Error(w, "unexpected type of web server", http.StatusInternalServerError) - return - } - - md, ok := ServerMetadataFromContext(ctx) - if !ok { - grpclog.Infof("Failed to extract ServerMetadata from context") - http.Error(w, "unexpected error", http.StatusInternalServerError) - return - } - handleForwardResponseServerMetadata(w, mux, md) - - w.Header().Set("Transfer-Encoding", "chunked") - w.Header().Set("Content-Type", marshaler.ContentType()) - if err := handleForwardResponseOptions(ctx, w, nil, opts); err != nil { - HTTPError(ctx, mux, marshaler, w, req, err) - return - } - - var delimiter []byte - if d, ok := marshaler.(Delimited); ok { - delimiter = d.Delimiter() - } else { - delimiter = []byte("\n") - } - - var wroteHeader bool - for { - resp, err := recv() - if err == io.EOF { - return - } - if err != nil { - handleForwardResponseStreamError(wroteHeader, marshaler, w, err) - return - } - if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil { - handleForwardResponseStreamError(wroteHeader, marshaler, w, err) - return - } - - buf, err := marshaler.Marshal(streamChunk(resp, nil)) - if err != nil { - grpclog.Infof("Failed to marshal response chunk: %v", err) - handleForwardResponseStreamError(wroteHeader, marshaler, w, err) - return - } - if _, err = w.Write(buf); err != nil { - grpclog.Infof("Failed to send response chunk: %v", err) - return - } - wroteHeader = true - if _, err = w.Write(delimiter); err != nil { - grpclog.Infof("Failed to send delimiter chunk: %v", err) - return - } - f.Flush() - } -} - -func handleForwardResponseServerMetadata(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) { - for k, vs := range md.HeaderMD { - if h, ok := mux.outgoingHeaderMatcher(k); ok { - for _, v := range vs { - w.Header().Add(h, v) - } - } - } -} - -func handleForwardResponseTrailerHeader(w http.ResponseWriter, md ServerMetadata) { - for k := range md.TrailerMD { - tKey := textproto.CanonicalMIMEHeaderKey(fmt.Sprintf("%s%s", MetadataTrailerPrefix, k)) - w.Header().Add("Trailer", tKey) - } -} - -func handleForwardResponseTrailer(w http.ResponseWriter, md ServerMetadata) { - for k, vs := range md.TrailerMD { - tKey := fmt.Sprintf("%s%s", MetadataTrailerPrefix, k) - for _, v := range vs { - w.Header().Add(tKey, v) - } - } -} - -// responseBody interface contains method for getting field for marshaling to the response body -// this method is generated for response struct from the value of `response_body` in the `google.api.HttpRule` -type responseBody interface { - XXX_ResponseBody() interface{} -} - -// ForwardResponseMessage forwards the message "resp" from gRPC server to REST client. -func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { - md, ok := ServerMetadataFromContext(ctx) - if !ok { - grpclog.Infof("Failed to extract ServerMetadata from context") - } - - handleForwardResponseServerMetadata(w, mux, md) - handleForwardResponseTrailerHeader(w, md) - - contentType := marshaler.ContentType() - // Check marshaler on run time in order to keep backwards compatability - // An interface param needs to be added to the ContentType() function on - // the Marshal interface to be able to remove this check - if httpBodyMarshaler, ok := marshaler.(*HTTPBodyMarshaler); ok { - contentType = httpBodyMarshaler.ContentTypeFromMessage(resp) - } - w.Header().Set("Content-Type", contentType) - - if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil { - HTTPError(ctx, mux, marshaler, w, req, err) - return - } - var buf []byte - var err error - if rb, ok := resp.(responseBody); ok { - buf, err = marshaler.Marshal(rb.XXX_ResponseBody()) - } else { - buf, err = marshaler.Marshal(resp) - } - if err != nil { - grpclog.Infof("Marshal error: %v", err) - HTTPError(ctx, mux, marshaler, w, req, err) - return - } - - if _, err = w.Write(buf); err != nil { - grpclog.Infof("Failed to write response: %v", err) - } - - handleForwardResponseTrailer(w, md) -} - -func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, resp proto.Message, opts []func(context.Context, http.ResponseWriter, proto.Message) error) error { - if len(opts) == 0 { - return nil - } - for _, opt := range opts { - if err := opt(ctx, w, resp); err != nil { - grpclog.Infof("Error handling ForwardResponseOptions: %v", err) - return err - } - } - return nil -} - -func handleForwardResponseStreamError(wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, err error) { - buf, merr := marshaler.Marshal(streamChunk(nil, err)) - if merr != nil { - grpclog.Infof("Failed to marshal an error: %v", merr) - return - } - if !wroteHeader { - s, ok := status.FromError(err) - if !ok { - s = status.New(codes.Unknown, err.Error()) - } - w.WriteHeader(HTTPStatusFromCode(s.Code())) - } - if _, werr := w.Write(buf); werr != nil { - grpclog.Infof("Failed to notify error to client: %v", werr) - return - } -} - -func streamChunk(result proto.Message, err error) map[string]proto.Message { - if err != nil { - grpcCode := codes.Unknown - grpcMessage := err.Error() - var grpcDetails []*any.Any - if s, ok := status.FromError(err); ok { - grpcCode = s.Code() - grpcMessage = s.Message() - grpcDetails = s.Proto().GetDetails() - } - httpCode := HTTPStatusFromCode(grpcCode) - return map[string]proto.Message{ - "error": &internal.StreamError{ - GrpcCode: int32(grpcCode), - HttpCode: int32(httpCode), - Message: grpcMessage, - HttpStatus: http.StatusText(httpCode), - Details: grpcDetails, - }, - } - } - if result == nil { - return streamChunk(nil, fmt.Errorf("empty response")) - } - return map[string]proto.Message{"result": result} -} diff --git a/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go b/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go deleted file mode 100644 index f55285b..0000000 --- a/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go +++ /dev/null @@ -1,43 +0,0 @@ -package runtime - -import ( - "google.golang.org/genproto/googleapis/api/httpbody" -) - -// SetHTTPBodyMarshaler overwrite the default marshaler with the HTTPBodyMarshaler -func SetHTTPBodyMarshaler(serveMux *ServeMux) { - serveMux.marshalers.mimeMap[MIMEWildcard] = &HTTPBodyMarshaler{ - Marshaler: &JSONPb{OrigName: true}, - } -} - -// HTTPBodyMarshaler is a Marshaler which supports marshaling of a -// google.api.HttpBody message as the full response body if it is -// the actual message used as the response. If not, then this will -// simply fallback to the Marshaler specified as its default Marshaler. -type HTTPBodyMarshaler struct { - Marshaler -} - -// ContentType implementation to keep backwards compatability with marshal interface -func (h *HTTPBodyMarshaler) ContentType() string { - return h.ContentTypeFromMessage(nil) -} - -// ContentTypeFromMessage in case v is a google.api.HttpBody message it returns -// its specified content type otherwise fall back to the default Marshaler. -func (h *HTTPBodyMarshaler) ContentTypeFromMessage(v interface{}) string { - if httpBody, ok := v.(*httpbody.HttpBody); ok { - return httpBody.GetContentType() - } - return h.Marshaler.ContentType() -} - -// Marshal marshals "v" by returning the body bytes if v is a -// google.api.HttpBody message, otherwise it falls back to the default Marshaler. -func (h *HTTPBodyMarshaler) Marshal(v interface{}) ([]byte, error) { - if httpBody, ok := v.(*httpbody.HttpBody); ok { - return httpBody.Data, nil - } - return h.Marshaler.Marshal(v) -} diff --git a/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go b/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go deleted file mode 100644 index f9d3a58..0000000 --- a/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go +++ /dev/null @@ -1,45 +0,0 @@ -package runtime - -import ( - "encoding/json" - "io" -) - -// JSONBuiltin is a Marshaler which marshals/unmarshals into/from JSON -// with the standard "encoding/json" package of Golang. -// Although it is generally faster for simple proto messages than JSONPb, -// it does not support advanced features of protobuf, e.g. map, oneof, .... -// -// The NewEncoder and NewDecoder types return *json.Encoder and -// *json.Decoder respectively. -type JSONBuiltin struct{} - -// ContentType always Returns "application/json". -func (*JSONBuiltin) ContentType() string { - return "application/json" -} - -// Marshal marshals "v" into JSON -func (j *JSONBuiltin) Marshal(v interface{}) ([]byte, error) { - return json.Marshal(v) -} - -// Unmarshal unmarshals JSON data into "v". -func (j *JSONBuiltin) Unmarshal(data []byte, v interface{}) error { - return json.Unmarshal(data, v) -} - -// NewDecoder returns a Decoder which reads JSON stream from "r". -func (j *JSONBuiltin) NewDecoder(r io.Reader) Decoder { - return json.NewDecoder(r) -} - -// NewEncoder returns an Encoder which writes JSON stream into "w". -func (j *JSONBuiltin) NewEncoder(w io.Writer) Encoder { - return json.NewEncoder(w) -} - -// Delimiter for newline encoded JSON streams. -func (j *JSONBuiltin) Delimiter() []byte { - return []byte("\n") -} diff --git a/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go b/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go deleted file mode 100644 index 3530ddd..0000000 --- a/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go +++ /dev/null @@ -1,242 +0,0 @@ -package runtime - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "reflect" - - "github.com/golang/protobuf/jsonpb" - "github.com/golang/protobuf/proto" -) - -// JSONPb is a Marshaler which marshals/unmarshals into/from JSON -// with the "github.com/golang/protobuf/jsonpb". -// It supports fully functionality of protobuf unlike JSONBuiltin. -// -// The NewDecoder method returns a DecoderWrapper, so the underlying -// *json.Decoder methods can be used. -type JSONPb jsonpb.Marshaler - -// ContentType always returns "application/json". -func (*JSONPb) ContentType() string { - return "application/json" -} - -// Marshal marshals "v" into JSON. -func (j *JSONPb) Marshal(v interface{}) ([]byte, error) { - if _, ok := v.(proto.Message); !ok { - return j.marshalNonProtoField(v) - } - - var buf bytes.Buffer - if err := j.marshalTo(&buf, v); err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -func (j *JSONPb) marshalTo(w io.Writer, v interface{}) error { - p, ok := v.(proto.Message) - if !ok { - buf, err := j.marshalNonProtoField(v) - if err != nil { - return err - } - _, err = w.Write(buf) - return err - } - return (*jsonpb.Marshaler)(j).Marshal(w, p) -} - -var ( - // protoMessageType is stored to prevent constant lookup of the same type at runtime. - protoMessageType = reflect.TypeOf((*proto.Message)(nil)).Elem() -) - -// marshalNonProto marshals a non-message field of a protobuf message. -// This function does not correctly marshals arbitrary data structure into JSON, -// but it is only capable of marshaling non-message field values of protobuf, -// i.e. primitive types, enums; pointers to primitives or enums; maps from -// integer/string types to primitives/enums/pointers to messages. -func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) { - if v == nil { - return []byte("null"), nil - } - rv := reflect.ValueOf(v) - for rv.Kind() == reflect.Ptr { - if rv.IsNil() { - return []byte("null"), nil - } - rv = rv.Elem() - } - - if rv.Kind() == reflect.Slice { - if rv.IsNil() { - if j.EmitDefaults { - return []byte("[]"), nil - } - return []byte("null"), nil - } - - if rv.Type().Elem().Implements(protoMessageType) { - var buf bytes.Buffer - err := buf.WriteByte('[') - if err != nil { - return nil, err - } - for i := 0; i < rv.Len(); i++ { - if i != 0 { - err = buf.WriteByte(',') - if err != nil { - return nil, err - } - } - if err = (*jsonpb.Marshaler)(j).Marshal(&buf, rv.Index(i).Interface().(proto.Message)); err != nil { - return nil, err - } - } - err = buf.WriteByte(']') - if err != nil { - return nil, err - } - - return buf.Bytes(), nil - } - } - - if rv.Kind() == reflect.Map { - m := make(map[string]*json.RawMessage) - for _, k := range rv.MapKeys() { - buf, err := j.Marshal(rv.MapIndex(k).Interface()) - if err != nil { - return nil, err - } - m[fmt.Sprintf("%v", k.Interface())] = (*json.RawMessage)(&buf) - } - if j.Indent != "" { - return json.MarshalIndent(m, "", j.Indent) - } - return json.Marshal(m) - } - if enum, ok := rv.Interface().(protoEnum); ok && !j.EnumsAsInts { - return json.Marshal(enum.String()) - } - return json.Marshal(rv.Interface()) -} - -// Unmarshal unmarshals JSON "data" into "v" -func (j *JSONPb) Unmarshal(data []byte, v interface{}) error { - return unmarshalJSONPb(data, v) -} - -// NewDecoder returns a Decoder which reads JSON stream from "r". -func (j *JSONPb) NewDecoder(r io.Reader) Decoder { - d := json.NewDecoder(r) - return DecoderWrapper{Decoder: d} -} - -// DecoderWrapper is a wrapper around a *json.Decoder that adds -// support for protos to the Decode method. -type DecoderWrapper struct { - *json.Decoder -} - -// Decode wraps the embedded decoder's Decode method to support -// protos using a jsonpb.Unmarshaler. -func (d DecoderWrapper) Decode(v interface{}) error { - return decodeJSONPb(d.Decoder, v) -} - -// NewEncoder returns an Encoder which writes JSON stream into "w". -func (j *JSONPb) NewEncoder(w io.Writer) Encoder { - return EncoderFunc(func(v interface{}) error { return j.marshalTo(w, v) }) -} - -func unmarshalJSONPb(data []byte, v interface{}) error { - d := json.NewDecoder(bytes.NewReader(data)) - return decodeJSONPb(d, v) -} - -func decodeJSONPb(d *json.Decoder, v interface{}) error { - p, ok := v.(proto.Message) - if !ok { - return decodeNonProtoField(d, v) - } - unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: true} - return unmarshaler.UnmarshalNext(d, p) -} - -func decodeNonProtoField(d *json.Decoder, v interface{}) error { - rv := reflect.ValueOf(v) - if rv.Kind() != reflect.Ptr { - return fmt.Errorf("%T is not a pointer", v) - } - for rv.Kind() == reflect.Ptr { - if rv.IsNil() { - rv.Set(reflect.New(rv.Type().Elem())) - } - if rv.Type().ConvertibleTo(typeProtoMessage) { - unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: true} - return unmarshaler.UnmarshalNext(d, rv.Interface().(proto.Message)) - } - rv = rv.Elem() - } - if rv.Kind() == reflect.Map { - if rv.IsNil() { - rv.Set(reflect.MakeMap(rv.Type())) - } - conv, ok := convFromType[rv.Type().Key().Kind()] - if !ok { - return fmt.Errorf("unsupported type of map field key: %v", rv.Type().Key()) - } - - m := make(map[string]*json.RawMessage) - if err := d.Decode(&m); err != nil { - return err - } - for k, v := range m { - result := conv.Call([]reflect.Value{reflect.ValueOf(k)}) - if err := result[1].Interface(); err != nil { - return err.(error) - } - bk := result[0] - bv := reflect.New(rv.Type().Elem()) - if err := unmarshalJSONPb([]byte(*v), bv.Interface()); err != nil { - return err - } - rv.SetMapIndex(bk, bv.Elem()) - } - return nil - } - if _, ok := rv.Interface().(protoEnum); ok { - var repr interface{} - if err := d.Decode(&repr); err != nil { - return err - } - switch repr.(type) { - case string: - // TODO(yugui) Should use proto.StructProperties? - return fmt.Errorf("unmarshaling of symbolic enum %q not supported: %T", repr, rv.Interface()) - case float64: - rv.Set(reflect.ValueOf(int32(repr.(float64))).Convert(rv.Type())) - return nil - default: - return fmt.Errorf("cannot assign %#v into Go type %T", repr, rv.Interface()) - } - } - return d.Decode(v) -} - -type protoEnum interface { - fmt.Stringer - EnumDescriptor() ([]byte, []int) -} - -var typeProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem() - -// Delimiter for newline encoded JSON streams. -func (j *JSONPb) Delimiter() []byte { - return []byte("\n") -} diff --git a/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go b/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go deleted file mode 100644 index f65d1a2..0000000 --- a/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go +++ /dev/null @@ -1,62 +0,0 @@ -package runtime - -import ( - "io" - - "errors" - "github.com/golang/protobuf/proto" - "io/ioutil" -) - -// ProtoMarshaller is a Marshaller which marshals/unmarshals into/from serialize proto bytes -type ProtoMarshaller struct{} - -// ContentType always returns "application/octet-stream". -func (*ProtoMarshaller) ContentType() string { - return "application/octet-stream" -} - -// Marshal marshals "value" into Proto -func (*ProtoMarshaller) Marshal(value interface{}) ([]byte, error) { - message, ok := value.(proto.Message) - if !ok { - return nil, errors.New("unable to marshal non proto field") - } - return proto.Marshal(message) -} - -// Unmarshal unmarshals proto "data" into "value" -func (*ProtoMarshaller) Unmarshal(data []byte, value interface{}) error { - message, ok := value.(proto.Message) - if !ok { - return errors.New("unable to unmarshal non proto field") - } - return proto.Unmarshal(data, message) -} - -// NewDecoder returns a Decoder which reads proto stream from "reader". -func (marshaller *ProtoMarshaller) NewDecoder(reader io.Reader) Decoder { - return DecoderFunc(func(value interface{}) error { - buffer, err := ioutil.ReadAll(reader) - if err != nil { - return err - } - return marshaller.Unmarshal(buffer, value) - }) -} - -// NewEncoder returns an Encoder which writes proto stream into "writer". -func (marshaller *ProtoMarshaller) NewEncoder(writer io.Writer) Encoder { - return EncoderFunc(func(value interface{}) error { - buffer, err := marshaller.Marshal(value) - if err != nil { - return err - } - _, err = writer.Write(buffer) - if err != nil { - return err - } - - return nil - }) -} diff --git a/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go b/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go deleted file mode 100644 index 98fe6e8..0000000 --- a/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go +++ /dev/null @@ -1,48 +0,0 @@ -package runtime - -import ( - "io" -) - -// Marshaler defines a conversion between byte sequence and gRPC payloads / fields. -type Marshaler interface { - // Marshal marshals "v" into byte sequence. - Marshal(v interface{}) ([]byte, error) - // Unmarshal unmarshals "data" into "v". - // "v" must be a pointer value. - Unmarshal(data []byte, v interface{}) error - // NewDecoder returns a Decoder which reads byte sequence from "r". - NewDecoder(r io.Reader) Decoder - // NewEncoder returns an Encoder which writes bytes sequence into "w". - NewEncoder(w io.Writer) Encoder - // ContentType returns the Content-Type which this marshaler is responsible for. - ContentType() string -} - -// Decoder decodes a byte sequence -type Decoder interface { - Decode(v interface{}) error -} - -// Encoder encodes gRPC payloads / fields into byte sequence. -type Encoder interface { - Encode(v interface{}) error -} - -// DecoderFunc adapts an decoder function into Decoder. -type DecoderFunc func(v interface{}) error - -// Decode delegates invocations to the underlying function itself. -func (f DecoderFunc) Decode(v interface{}) error { return f(v) } - -// EncoderFunc adapts an encoder function into Encoder -type EncoderFunc func(v interface{}) error - -// Encode delegates invocations to the underlying function itself. -func (f EncoderFunc) Encode(v interface{}) error { return f(v) } - -// Delimited defines the streaming delimiter. -type Delimited interface { - // Delimiter returns the record seperator for the stream. - Delimiter() []byte -} diff --git a/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go b/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go deleted file mode 100644 index 5cc53ae..0000000 --- a/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go +++ /dev/null @@ -1,91 +0,0 @@ -package runtime - -import ( - "errors" - "net/http" -) - -// MIMEWildcard is the fallback MIME type used for requests which do not match -// a registered MIME type. -const MIMEWildcard = "*" - -var ( - acceptHeader = http.CanonicalHeaderKey("Accept") - contentTypeHeader = http.CanonicalHeaderKey("Content-Type") - - defaultMarshaler = &JSONPb{OrigName: true} -) - -// MarshalerForRequest returns the inbound/outbound marshalers for this request. -// It checks the registry on the ServeMux for the MIME type set by the Content-Type header. -// If it isn't set (or the request Content-Type is empty), checks for "*". -// If there are multiple Content-Type headers set, choose the first one that it can -// exactly match in the registry. -// Otherwise, it follows the above logic for "*"/InboundMarshaler/OutboundMarshaler. -func MarshalerForRequest(mux *ServeMux, r *http.Request) (inbound Marshaler, outbound Marshaler) { - for _, acceptVal := range r.Header[acceptHeader] { - if m, ok := mux.marshalers.mimeMap[acceptVal]; ok { - outbound = m - break - } - } - - for _, contentTypeVal := range r.Header[contentTypeHeader] { - if m, ok := mux.marshalers.mimeMap[contentTypeVal]; ok { - inbound = m - break - } - } - - if inbound == nil { - inbound = mux.marshalers.mimeMap[MIMEWildcard] - } - if outbound == nil { - outbound = inbound - } - - return inbound, outbound -} - -// marshalerRegistry is a mapping from MIME types to Marshalers. -type marshalerRegistry struct { - mimeMap map[string]Marshaler -} - -// add adds a marshaler for a case-sensitive MIME type string ("*" to match any -// MIME type). -func (m marshalerRegistry) add(mime string, marshaler Marshaler) error { - if len(mime) == 0 { - return errors.New("empty MIME type") - } - - m.mimeMap[mime] = marshaler - - return nil -} - -// makeMarshalerMIMERegistry returns a new registry of marshalers. -// It allows for a mapping of case-sensitive Content-Type MIME type string to runtime.Marshaler interfaces. -// -// For example, you could allow the client to specify the use of the runtime.JSONPb marshaler -// with a "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler -// with a "application/json" Content-Type. -// "*" can be used to match any Content-Type. -// This can be attached to a ServerMux with the marshaler option. -func makeMarshalerMIMERegistry() marshalerRegistry { - return marshalerRegistry{ - mimeMap: map[string]Marshaler{ - MIMEWildcard: defaultMarshaler, - }, - } -} - -// WithMarshalerOption returns a ServeMuxOption which associates inbound and outbound -// Marshalers to a MIME type in mux. -func WithMarshalerOption(mime string, marshaler Marshaler) ServeMuxOption { - return func(mux *ServeMux) { - if err := mux.marshalers.add(mime, marshaler); err != nil { - panic(err) - } - } -} diff --git a/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go b/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go deleted file mode 100644 index ec81e55..0000000 --- a/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go +++ /dev/null @@ -1,268 +0,0 @@ -package runtime - -import ( - "context" - "fmt" - "net/http" - "net/textproto" - "strings" - - "github.com/golang/protobuf/proto" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -// A HandlerFunc handles a specific pair of path pattern and HTTP method. -type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string) - -// ServeMux is a request multiplexer for grpc-gateway. -// It matches http requests to patterns and invokes the corresponding handler. -type ServeMux struct { - // handlers maps HTTP method to a list of handlers. - handlers map[string][]handler - forwardResponseOptions []func(context.Context, http.ResponseWriter, proto.Message) error - marshalers marshalerRegistry - incomingHeaderMatcher HeaderMatcherFunc - outgoingHeaderMatcher HeaderMatcherFunc - metadataAnnotators []func(context.Context, *http.Request) metadata.MD - protoErrorHandler ProtoErrorHandlerFunc - disablePathLengthFallback bool -} - -// ServeMuxOption is an option that can be given to a ServeMux on construction. -type ServeMuxOption func(*ServeMux) - -// WithForwardResponseOption returns a ServeMuxOption representing the forwardResponseOption. -// -// forwardResponseOption is an option that will be called on the relevant context.Context, -// http.ResponseWriter, and proto.Message before every forwarded response. -// -// The message may be nil in the case where just a header is being sent. -func WithForwardResponseOption(forwardResponseOption func(context.Context, http.ResponseWriter, proto.Message) error) ServeMuxOption { - return func(serveMux *ServeMux) { - serveMux.forwardResponseOptions = append(serveMux.forwardResponseOptions, forwardResponseOption) - } -} - -// HeaderMatcherFunc checks whether a header key should be forwarded to/from gRPC context. -type HeaderMatcherFunc func(string) (string, bool) - -// DefaultHeaderMatcher is used to pass http request headers to/from gRPC context. This adds permanent HTTP header -// keys (as specified by the IANA) to gRPC context with grpcgateway- prefix. HTTP headers that start with -// 'Grpc-Metadata-' are mapped to gRPC metadata after removing prefix 'Grpc-Metadata-'. -func DefaultHeaderMatcher(key string) (string, bool) { - key = textproto.CanonicalMIMEHeaderKey(key) - if isPermanentHTTPHeader(key) { - return MetadataPrefix + key, true - } else if strings.HasPrefix(key, MetadataHeaderPrefix) { - return key[len(MetadataHeaderPrefix):], true - } - return "", false -} - -// WithIncomingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for incoming request to gateway. -// -// This matcher will be called with each header in http.Request. If matcher returns true, that header will be -// passed to gRPC context. To transform the header before passing to gRPC context, matcher should return modified header. -func WithIncomingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption { - return func(mux *ServeMux) { - mux.incomingHeaderMatcher = fn - } -} - -// WithOutgoingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway. -// -// This matcher will be called with each header in response header metadata. If matcher returns true, that header will be -// passed to http response returned from gateway. To transform the header before passing to response, -// matcher should return modified header. -func WithOutgoingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption { - return func(mux *ServeMux) { - mux.outgoingHeaderMatcher = fn - } -} - -// WithMetadata returns a ServeMuxOption for passing metadata to a gRPC context. -// -// This can be used by services that need to read from http.Request and modify gRPC context. A common use case -// is reading token from cookie and adding it in gRPC context. -func WithMetadata(annotator func(context.Context, *http.Request) metadata.MD) ServeMuxOption { - return func(serveMux *ServeMux) { - serveMux.metadataAnnotators = append(serveMux.metadataAnnotators, annotator) - } -} - -// WithProtoErrorHandler returns a ServeMuxOption for passing metadata to a gRPC context. -// -// This can be used to handle an error as general proto message defined by gRPC. -// The response including body and status is not backward compatible with the default error handler. -// When this option is used, HTTPError and OtherErrorHandler are overwritten on initialization. -func WithProtoErrorHandler(fn ProtoErrorHandlerFunc) ServeMuxOption { - return func(serveMux *ServeMux) { - serveMux.protoErrorHandler = fn - } -} - -// WithDisablePathLengthFallback returns a ServeMuxOption for disable path length fallback. -func WithDisablePathLengthFallback() ServeMuxOption { - return func(serveMux *ServeMux) { - serveMux.disablePathLengthFallback = true - } -} - -// NewServeMux returns a new ServeMux whose internal mapping is empty. -func NewServeMux(opts ...ServeMuxOption) *ServeMux { - serveMux := &ServeMux{ - handlers: make(map[string][]handler), - forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0), - marshalers: makeMarshalerMIMERegistry(), - } - - for _, opt := range opts { - opt(serveMux) - } - - if serveMux.protoErrorHandler != nil { - HTTPError = serveMux.protoErrorHandler - // OtherErrorHandler is no longer used when protoErrorHandler is set. - // Overwritten by a special error handler to return Unknown. - OtherErrorHandler = func(w http.ResponseWriter, r *http.Request, _ string, _ int) { - ctx := context.Background() - _, outboundMarshaler := MarshalerForRequest(serveMux, r) - sterr := status.Error(codes.Unknown, "unexpected use of OtherErrorHandler") - serveMux.protoErrorHandler(ctx, serveMux, outboundMarshaler, w, r, sterr) - } - } - - if serveMux.incomingHeaderMatcher == nil { - serveMux.incomingHeaderMatcher = DefaultHeaderMatcher - } - - if serveMux.outgoingHeaderMatcher == nil { - serveMux.outgoingHeaderMatcher = func(key string) (string, bool) { - return fmt.Sprintf("%s%s", MetadataHeaderPrefix, key), true - } - } - - return serveMux -} - -// Handle associates "h" to the pair of HTTP method and path pattern. -func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) { - s.handlers[meth] = append(s.handlers[meth], handler{pat: pat, h: h}) -} - -// ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.Path. -func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - - path := r.URL.Path - if !strings.HasPrefix(path, "/") { - if s.protoErrorHandler != nil { - _, outboundMarshaler := MarshalerForRequest(s, r) - sterr := status.Error(codes.InvalidArgument, http.StatusText(http.StatusBadRequest)) - s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) - } else { - OtherErrorHandler(w, r, http.StatusText(http.StatusBadRequest), http.StatusBadRequest) - } - return - } - - components := strings.Split(path[1:], "/") - l := len(components) - var verb string - if idx := strings.LastIndex(components[l-1], ":"); idx == 0 { - if s.protoErrorHandler != nil { - _, outboundMarshaler := MarshalerForRequest(s, r) - sterr := status.Error(codes.Unimplemented, http.StatusText(http.StatusNotImplemented)) - s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) - } else { - OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound) - } - return - } else if idx > 0 { - c := components[l-1] - components[l-1], verb = c[:idx], c[idx+1:] - } - - if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && s.isPathLengthFallback(r) { - r.Method = strings.ToUpper(override) - if err := r.ParseForm(); err != nil { - if s.protoErrorHandler != nil { - _, outboundMarshaler := MarshalerForRequest(s, r) - sterr := status.Error(codes.InvalidArgument, err.Error()) - s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) - } else { - OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest) - } - return - } - } - for _, h := range s.handlers[r.Method] { - pathParams, err := h.pat.Match(components, verb) - if err != nil { - continue - } - h.h(w, r, pathParams) - return - } - - // lookup other methods to handle fallback from GET to POST and - // to determine if it is MethodNotAllowed or NotFound. - for m, handlers := range s.handlers { - if m == r.Method { - continue - } - for _, h := range handlers { - pathParams, err := h.pat.Match(components, verb) - if err != nil { - continue - } - // X-HTTP-Method-Override is optional. Always allow fallback to POST. - if s.isPathLengthFallback(r) { - if err := r.ParseForm(); err != nil { - if s.protoErrorHandler != nil { - _, outboundMarshaler := MarshalerForRequest(s, r) - sterr := status.Error(codes.InvalidArgument, err.Error()) - s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) - } else { - OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest) - } - return - } - h.h(w, r, pathParams) - return - } - if s.protoErrorHandler != nil { - _, outboundMarshaler := MarshalerForRequest(s, r) - sterr := status.Error(codes.Unimplemented, http.StatusText(http.StatusMethodNotAllowed)) - s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) - } else { - OtherErrorHandler(w, r, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed) - } - return - } - } - - if s.protoErrorHandler != nil { - _, outboundMarshaler := MarshalerForRequest(s, r) - sterr := status.Error(codes.Unimplemented, http.StatusText(http.StatusNotImplemented)) - s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) - } else { - OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound) - } -} - -// GetForwardResponseOptions returns the ForwardResponseOptions associated with this ServeMux. -func (s *ServeMux) GetForwardResponseOptions() []func(context.Context, http.ResponseWriter, proto.Message) error { - return s.forwardResponseOptions -} - -func (s *ServeMux) isPathLengthFallback(r *http.Request) bool { - return !s.disablePathLengthFallback && r.Method == "POST" && r.Header.Get("Content-Type") == "application/x-www-form-urlencoded" -} - -type handler struct { - pat Pattern - h HandlerFunc -} diff --git a/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go b/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go deleted file mode 100644 index f16a84a..0000000 --- a/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go +++ /dev/null @@ -1,227 +0,0 @@ -package runtime - -import ( - "errors" - "fmt" - "strings" - - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "google.golang.org/grpc/grpclog" -) - -var ( - // ErrNotMatch indicates that the given HTTP request path does not match to the pattern. - ErrNotMatch = errors.New("not match to the path pattern") - // ErrInvalidPattern indicates that the given definition of Pattern is not valid. - ErrInvalidPattern = errors.New("invalid pattern") -) - -type op struct { - code utilities.OpCode - operand int -} - -// Pattern is a template pattern of http request paths defined in github.com/googleapis/googleapis/google/api/http.proto. -type Pattern struct { - // ops is a list of operations - ops []op - // pool is a constant pool indexed by the operands or vars. - pool []string - // vars is a list of variables names to be bound by this pattern - vars []string - // stacksize is the max depth of the stack - stacksize int - // tailLen is the length of the fixed-size segments after a deep wildcard - tailLen int - // verb is the VERB part of the path pattern. It is empty if the pattern does not have VERB part. - verb string -} - -// NewPattern returns a new Pattern from the given definition values. -// "ops" is a sequence of op codes. "pool" is a constant pool. -// "verb" is the verb part of the pattern. It is empty if the pattern does not have the part. -// "version" must be 1 for now. -// It returns an error if the given definition is invalid. -func NewPattern(version int, ops []int, pool []string, verb string) (Pattern, error) { - if version != 1 { - grpclog.Infof("unsupported version: %d", version) - return Pattern{}, ErrInvalidPattern - } - - l := len(ops) - if l%2 != 0 { - grpclog.Infof("odd number of ops codes: %d", l) - return Pattern{}, ErrInvalidPattern - } - - var ( - typedOps []op - stack, maxstack int - tailLen int - pushMSeen bool - vars []string - ) - for i := 0; i < l; i += 2 { - op := op{code: utilities.OpCode(ops[i]), operand: ops[i+1]} - switch op.code { - case utilities.OpNop: - continue - case utilities.OpPush: - if pushMSeen { - tailLen++ - } - stack++ - case utilities.OpPushM: - if pushMSeen { - grpclog.Infof("pushM appears twice") - return Pattern{}, ErrInvalidPattern - } - pushMSeen = true - stack++ - case utilities.OpLitPush: - if op.operand < 0 || len(pool) <= op.operand { - grpclog.Infof("negative literal index: %d", op.operand) - return Pattern{}, ErrInvalidPattern - } - if pushMSeen { - tailLen++ - } - stack++ - case utilities.OpConcatN: - if op.operand <= 0 { - grpclog.Infof("negative concat size: %d", op.operand) - return Pattern{}, ErrInvalidPattern - } - stack -= op.operand - if stack < 0 { - grpclog.Print("stack underflow") - return Pattern{}, ErrInvalidPattern - } - stack++ - case utilities.OpCapture: - if op.operand < 0 || len(pool) <= op.operand { - grpclog.Infof("variable name index out of bound: %d", op.operand) - return Pattern{}, ErrInvalidPattern - } - v := pool[op.operand] - op.operand = len(vars) - vars = append(vars, v) - stack-- - if stack < 0 { - grpclog.Infof("stack underflow") - return Pattern{}, ErrInvalidPattern - } - default: - grpclog.Infof("invalid opcode: %d", op.code) - return Pattern{}, ErrInvalidPattern - } - - if maxstack < stack { - maxstack = stack - } - typedOps = append(typedOps, op) - } - return Pattern{ - ops: typedOps, - pool: pool, - vars: vars, - stacksize: maxstack, - tailLen: tailLen, - verb: verb, - }, nil -} - -// MustPattern is a helper function which makes it easier to call NewPattern in variable initialization. -func MustPattern(p Pattern, err error) Pattern { - if err != nil { - grpclog.Fatalf("Pattern initialization failed: %v", err) - } - return p -} - -// Match examines components if it matches to the Pattern. -// If it matches, the function returns a mapping from field paths to their captured values. -// If otherwise, the function returns an error. -func (p Pattern) Match(components []string, verb string) (map[string]string, error) { - if p.verb != verb { - return nil, ErrNotMatch - } - - var pos int - stack := make([]string, 0, p.stacksize) - captured := make([]string, len(p.vars)) - l := len(components) - for _, op := range p.ops { - switch op.code { - case utilities.OpNop: - continue - case utilities.OpPush, utilities.OpLitPush: - if pos >= l { - return nil, ErrNotMatch - } - c := components[pos] - if op.code == utilities.OpLitPush { - if lit := p.pool[op.operand]; c != lit { - return nil, ErrNotMatch - } - } - stack = append(stack, c) - pos++ - case utilities.OpPushM: - end := len(components) - if end < pos+p.tailLen { - return nil, ErrNotMatch - } - end -= p.tailLen - stack = append(stack, strings.Join(components[pos:end], "/")) - pos = end - case utilities.OpConcatN: - n := op.operand - l := len(stack) - n - stack = append(stack[:l], strings.Join(stack[l:], "/")) - case utilities.OpCapture: - n := len(stack) - 1 - captured[op.operand] = stack[n] - stack = stack[:n] - } - } - if pos < l { - return nil, ErrNotMatch - } - bindings := make(map[string]string) - for i, val := range captured { - bindings[p.vars[i]] = val - } - return bindings, nil -} - -// Verb returns the verb part of the Pattern. -func (p Pattern) Verb() string { return p.verb } - -func (p Pattern) String() string { - var stack []string - for _, op := range p.ops { - switch op.code { - case utilities.OpNop: - continue - case utilities.OpPush: - stack = append(stack, "*") - case utilities.OpLitPush: - stack = append(stack, p.pool[op.operand]) - case utilities.OpPushM: - stack = append(stack, "**") - case utilities.OpConcatN: - n := op.operand - l := len(stack) - n - stack = append(stack[:l], strings.Join(stack[l:], "/")) - case utilities.OpCapture: - n := len(stack) - 1 - stack[n] = fmt.Sprintf("{%s=%s}", p.vars[op.operand], stack[n]) - } - } - segs := strings.Join(stack, "/") - if p.verb != "" { - return fmt.Sprintf("/%s:%s", segs, p.verb) - } - return "/" + segs -} diff --git a/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go b/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go deleted file mode 100644 index a3151e2..0000000 --- a/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go +++ /dev/null @@ -1,80 +0,0 @@ -package runtime - -import ( - "github.com/golang/protobuf/proto" -) - -// StringP returns a pointer to a string whose pointee is same as the given string value. -func StringP(val string) (*string, error) { - return proto.String(val), nil -} - -// BoolP parses the given string representation of a boolean value, -// and returns a pointer to a bool whose value is same as the parsed value. -func BoolP(val string) (*bool, error) { - b, err := Bool(val) - if err != nil { - return nil, err - } - return proto.Bool(b), nil -} - -// Float64P parses the given string representation of a floating point number, -// and returns a pointer to a float64 whose value is same as the parsed number. -func Float64P(val string) (*float64, error) { - f, err := Float64(val) - if err != nil { - return nil, err - } - return proto.Float64(f), nil -} - -// Float32P parses the given string representation of a floating point number, -// and returns a pointer to a float32 whose value is same as the parsed number. -func Float32P(val string) (*float32, error) { - f, err := Float32(val) - if err != nil { - return nil, err - } - return proto.Float32(f), nil -} - -// Int64P parses the given string representation of an integer -// and returns a pointer to a int64 whose value is same as the parsed integer. -func Int64P(val string) (*int64, error) { - i, err := Int64(val) - if err != nil { - return nil, err - } - return proto.Int64(i), nil -} - -// Int32P parses the given string representation of an integer -// and returns a pointer to a int32 whose value is same as the parsed integer. -func Int32P(val string) (*int32, error) { - i, err := Int32(val) - if err != nil { - return nil, err - } - return proto.Int32(i), err -} - -// Uint64P parses the given string representation of an integer -// and returns a pointer to a uint64 whose value is same as the parsed integer. -func Uint64P(val string) (*uint64, error) { - i, err := Uint64(val) - if err != nil { - return nil, err - } - return proto.Uint64(i), err -} - -// Uint32P parses the given string representation of an integer -// and returns a pointer to a uint32 whose value is same as the parsed integer. -func Uint32P(val string) (*uint32, error) { - i, err := Uint32(val) - if err != nil { - return nil, err - } - return proto.Uint32(i), err -} diff --git a/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go b/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go deleted file mode 100644 index b7fa32e..0000000 --- a/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go +++ /dev/null @@ -1,70 +0,0 @@ -package runtime - -import ( - "io" - "net/http" - - "context" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/status" -) - -// ProtoErrorHandlerFunc handles the error as a gRPC error generated via status package and replies to the request. -type ProtoErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, error) - -var _ ProtoErrorHandlerFunc = DefaultHTTPProtoErrorHandler - -// DefaultHTTPProtoErrorHandler is an implementation of HTTPError. -// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode. -// If otherwise, it replies with http.StatusInternalServerError. -// -// The response body returned by this function is a Status message marshaled by a Marshaler. -// -// Do not set this function to HTTPError variable directly, use WithProtoErrorHandler option instead. -func DefaultHTTPProtoErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) { - // return Internal when Marshal failed - const fallback = `{"code": 13, "message": "failed to marshal error message"}` - - s, ok := status.FromError(err) - if !ok { - s = status.New(codes.Unknown, err.Error()) - } - - w.Header().Del("Trailer") - - contentType := marshaler.ContentType() - // Check marshaler on run time in order to keep backwards compatability - // An interface param needs to be added to the ContentType() function on - // the Marshal interface to be able to remove this check - if httpBodyMarshaler, ok := marshaler.(*HTTPBodyMarshaler); ok { - pb := s.Proto() - contentType = httpBodyMarshaler.ContentTypeFromMessage(pb) - } - w.Header().Set("Content-Type", contentType) - - buf, merr := marshaler.Marshal(s.Proto()) - if merr != nil { - grpclog.Infof("Failed to marshal error message %q: %v", s.Proto(), merr) - w.WriteHeader(http.StatusInternalServerError) - if _, err := io.WriteString(w, fallback); err != nil { - grpclog.Infof("Failed to write response: %v", err) - } - return - } - - md, ok := ServerMetadataFromContext(ctx) - if !ok { - grpclog.Infof("Failed to extract ServerMetadata from context") - } - - handleForwardResponseServerMetadata(w, mux, md) - handleForwardResponseTrailerHeader(w, md) - st := HTTPStatusFromCode(s.Code()) - w.WriteHeader(st) - if _, err := w.Write(buf); err != nil { - grpclog.Infof("Failed to write response: %v", err) - } - - handleForwardResponseTrailer(w, md) -} diff --git a/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go b/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go deleted file mode 100644 index bb9359f..0000000 --- a/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go +++ /dev/null @@ -1,392 +0,0 @@ -package runtime - -import ( - "encoding/base64" - "fmt" - "net/url" - "reflect" - "regexp" - "strconv" - "strings" - "time" - - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "google.golang.org/grpc/grpclog" -) - -// PopulateQueryParameters populates "values" into "msg". -// A value is ignored if its key starts with one of the elements in "filter". -func PopulateQueryParameters(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error { - for key, values := range values { - re, err := regexp.Compile("^(.*)\\[(.*)\\]$") - if err != nil { - return err - } - match := re.FindStringSubmatch(key) - if len(match) == 3 { - key = match[1] - values = append([]string{match[2]}, values...) - } - fieldPath := strings.Split(key, ".") - if filter.HasCommonPrefix(fieldPath) { - continue - } - if err := populateFieldValueFromPath(msg, fieldPath, values); err != nil { - return err - } - } - return nil -} - -// PopulateFieldFromPath sets a value in a nested Protobuf structure. -// It instantiates missing protobuf fields as it goes. -func PopulateFieldFromPath(msg proto.Message, fieldPathString string, value string) error { - fieldPath := strings.Split(fieldPathString, ".") - return populateFieldValueFromPath(msg, fieldPath, []string{value}) -} - -func populateFieldValueFromPath(msg proto.Message, fieldPath []string, values []string) error { - m := reflect.ValueOf(msg) - if m.Kind() != reflect.Ptr { - return fmt.Errorf("unexpected type %T: %v", msg, msg) - } - var props *proto.Properties - m = m.Elem() - for i, fieldName := range fieldPath { - isLast := i == len(fieldPath)-1 - if !isLast && m.Kind() != reflect.Struct { - return fmt.Errorf("non-aggregate type in the mid of path: %s", strings.Join(fieldPath, ".")) - } - var f reflect.Value - var err error - f, props, err = fieldByProtoName(m, fieldName) - if err != nil { - return err - } else if !f.IsValid() { - grpclog.Infof("field not found in %T: %s", msg, strings.Join(fieldPath, ".")) - return nil - } - - switch f.Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, reflect.String, reflect.Uint32, reflect.Uint64: - if !isLast { - return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], ".")) - } - m = f - case reflect.Slice: - if !isLast { - return fmt.Errorf("unexpected repeated field in %s", strings.Join(fieldPath, ".")) - } - // Handle []byte - if f.Type().Elem().Kind() == reflect.Uint8 { - m = f - break - } - return populateRepeatedField(f, values, props) - case reflect.Ptr: - if f.IsNil() { - m = reflect.New(f.Type().Elem()) - f.Set(m.Convert(f.Type())) - } - m = f.Elem() - continue - case reflect.Struct: - m = f - continue - case reflect.Map: - if !isLast { - return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], ".")) - } - return populateMapField(f, values, props) - default: - return fmt.Errorf("unexpected type %s in %T", f.Type(), msg) - } - } - switch len(values) { - case 0: - return fmt.Errorf("no value of field: %s", strings.Join(fieldPath, ".")) - case 1: - default: - grpclog.Infof("too many field values: %s", strings.Join(fieldPath, ".")) - } - return populateField(m, values[0], props) -} - -// fieldByProtoName looks up a field whose corresponding protobuf field name is "name". -// "m" must be a struct value. It returns zero reflect.Value if no such field found. -func fieldByProtoName(m reflect.Value, name string) (reflect.Value, *proto.Properties, error) { - props := proto.GetProperties(m.Type()) - - // look up field name in oneof map - if op, ok := props.OneofTypes[name]; ok { - v := reflect.New(op.Type.Elem()) - field := m.Field(op.Field) - if !field.IsNil() { - return reflect.Value{}, nil, fmt.Errorf("field already set for %s oneof", props.Prop[op.Field].OrigName) - } - field.Set(v) - return v.Elem().Field(0), op.Prop, nil - } - - for _, p := range props.Prop { - if p.OrigName == name { - return m.FieldByName(p.Name), p, nil - } - if p.JSONName == name { - return m.FieldByName(p.Name), p, nil - } - } - return reflect.Value{}, nil, nil -} - -func populateMapField(f reflect.Value, values []string, props *proto.Properties) error { - if len(values) != 2 { - return fmt.Errorf("more than one value provided for key %s in map %s", values[0], props.Name) - } - - key, value := values[0], values[1] - keyType := f.Type().Key() - valueType := f.Type().Elem() - if f.IsNil() { - f.Set(reflect.MakeMap(f.Type())) - } - - keyConv, ok := convFromType[keyType.Kind()] - if !ok { - return fmt.Errorf("unsupported key type %s in map %s", keyType, props.Name) - } - valueConv, ok := convFromType[valueType.Kind()] - if !ok { - return fmt.Errorf("unsupported value type %s in map %s", valueType, props.Name) - } - - keyV := keyConv.Call([]reflect.Value{reflect.ValueOf(key)}) - if err := keyV[1].Interface(); err != nil { - return err.(error) - } - valueV := valueConv.Call([]reflect.Value{reflect.ValueOf(value)}) - if err := valueV[1].Interface(); err != nil { - return err.(error) - } - - f.SetMapIndex(keyV[0].Convert(keyType), valueV[0].Convert(valueType)) - - return nil -} - -func populateRepeatedField(f reflect.Value, values []string, props *proto.Properties) error { - elemType := f.Type().Elem() - - // is the destination field a slice of an enumeration type? - if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil { - return populateFieldEnumRepeated(f, values, enumValMap) - } - - conv, ok := convFromType[elemType.Kind()] - if !ok { - return fmt.Errorf("unsupported field type %s", elemType) - } - f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type())) - for i, v := range values { - result := conv.Call([]reflect.Value{reflect.ValueOf(v)}) - if err := result[1].Interface(); err != nil { - return err.(error) - } - f.Index(i).Set(result[0].Convert(f.Index(i).Type())) - } - return nil -} - -func populateField(f reflect.Value, value string, props *proto.Properties) error { - i := f.Addr().Interface() - - // Handle protobuf well known types - type wkt interface { - XXX_WellKnownType() string - } - if wkt, ok := i.(wkt); ok { - switch wkt.XXX_WellKnownType() { - case "Timestamp": - if value == "null" { - f.Field(0).SetInt(0) - f.Field(1).SetInt(0) - return nil - } - - t, err := time.Parse(time.RFC3339Nano, value) - if err != nil { - return fmt.Errorf("bad Timestamp: %v", err) - } - f.Field(0).SetInt(int64(t.Unix())) - f.Field(1).SetInt(int64(t.Nanosecond())) - return nil - case "Duration": - if value == "null" { - f.Field(0).SetInt(0) - f.Field(1).SetInt(0) - return nil - } - d, err := time.ParseDuration(value) - if err != nil { - return fmt.Errorf("bad Duration: %v", err) - } - - ns := d.Nanoseconds() - s := ns / 1e9 - ns %= 1e9 - f.Field(0).SetInt(s) - f.Field(1).SetInt(ns) - return nil - case "DoubleValue": - fallthrough - case "FloatValue": - float64Val, err := strconv.ParseFloat(value, 64) - if err != nil { - return fmt.Errorf("bad DoubleValue: %s", value) - } - f.Field(0).SetFloat(float64Val) - return nil - case "Int64Value": - fallthrough - case "Int32Value": - int64Val, err := strconv.ParseInt(value, 10, 64) - if err != nil { - return fmt.Errorf("bad DoubleValue: %s", value) - } - f.Field(0).SetInt(int64Val) - return nil - case "UInt64Value": - fallthrough - case "UInt32Value": - uint64Val, err := strconv.ParseUint(value, 10, 64) - if err != nil { - return fmt.Errorf("bad DoubleValue: %s", value) - } - f.Field(0).SetUint(uint64Val) - return nil - case "BoolValue": - if value == "true" { - f.Field(0).SetBool(true) - } else if value == "false" { - f.Field(0).SetBool(false) - } else { - return fmt.Errorf("bad BoolValue: %s", value) - } - return nil - case "StringValue": - f.Field(0).SetString(value) - return nil - case "BytesValue": - bytesVal, err := base64.StdEncoding.DecodeString(value) - if err != nil { - return fmt.Errorf("bad BytesValue: %s", value) - } - f.Field(0).SetBytes(bytesVal) - return nil - } - } - - // Handle google well known types - if gwkt, ok := i.(proto.Message); ok { - switch proto.MessageName(gwkt) { - case "google.protobuf.FieldMask": - p := f.Field(0) - for _, v := range strings.Split(value, ",") { - if v != "" { - p.Set(reflect.Append(p, reflect.ValueOf(v))) - } - } - return nil - } - } - - // Handle Time and Duration stdlib types - switch t := i.(type) { - case *time.Time: - pt, err := time.Parse(time.RFC3339Nano, value) - if err != nil { - return fmt.Errorf("bad Timestamp: %v", err) - } - *t = pt - return nil - case *time.Duration: - d, err := time.ParseDuration(value) - if err != nil { - return fmt.Errorf("bad Duration: %v", err) - } - *t = d - return nil - } - - // is the destination field an enumeration type? - if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil { - return populateFieldEnum(f, value, enumValMap) - } - - conv, ok := convFromType[f.Kind()] - if !ok { - return fmt.Errorf("field type %T is not supported in query parameters", i) - } - result := conv.Call([]reflect.Value{reflect.ValueOf(value)}) - if err := result[1].Interface(); err != nil { - return err.(error) - } - f.Set(result[0].Convert(f.Type())) - return nil -} - -func convertEnum(value string, t reflect.Type, enumValMap map[string]int32) (reflect.Value, error) { - // see if it's an enumeration string - if enumVal, ok := enumValMap[value]; ok { - return reflect.ValueOf(enumVal).Convert(t), nil - } - - // check for an integer that matches an enumeration value - eVal, err := strconv.Atoi(value) - if err != nil { - return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t) - } - for _, v := range enumValMap { - if v == int32(eVal) { - return reflect.ValueOf(eVal).Convert(t), nil - } - } - return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t) -} - -func populateFieldEnum(f reflect.Value, value string, enumValMap map[string]int32) error { - cval, err := convertEnum(value, f.Type(), enumValMap) - if err != nil { - return err - } - f.Set(cval) - return nil -} - -func populateFieldEnumRepeated(f reflect.Value, values []string, enumValMap map[string]int32) error { - elemType := f.Type().Elem() - f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type())) - for i, v := range values { - result, err := convertEnum(v, elemType, enumValMap) - if err != nil { - return err - } - f.Index(i).Set(result) - } - return nil -} - -var ( - convFromType = map[reflect.Kind]reflect.Value{ - reflect.String: reflect.ValueOf(String), - reflect.Bool: reflect.ValueOf(Bool), - reflect.Float64: reflect.ValueOf(Float64), - reflect.Float32: reflect.ValueOf(Float32), - reflect.Int64: reflect.ValueOf(Int64), - reflect.Int32: reflect.ValueOf(Int32), - reflect.Uint64: reflect.ValueOf(Uint64), - reflect.Uint32: reflect.ValueOf(Uint32), - reflect.Slice: reflect.ValueOf(Bytes), - } -) diff --git a/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel b/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel deleted file mode 100644 index 7109d79..0000000 --- a/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -package(default_visibility = ["//visibility:public"]) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "pattern.go", - "readerfactory.go", - "trie.go", - ], - importpath = "github.com/grpc-ecosystem/grpc-gateway/utilities", -) - -go_test( - name = "go_default_test", - size = "small", - srcs = ["trie_test.go"], - embed = [":go_default_library"], -) diff --git a/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go b/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go deleted file mode 100644 index cf79a4d..0000000 --- a/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package utilities provides members for internal use in grpc-gateway. -package utilities diff --git a/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go b/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go deleted file mode 100644 index dfe7de4..0000000 --- a/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go +++ /dev/null @@ -1,22 +0,0 @@ -package utilities - -// An OpCode is a opcode of compiled path patterns. -type OpCode int - -// These constants are the valid values of OpCode. -const ( - // OpNop does nothing - OpNop = OpCode(iota) - // OpPush pushes a component to stack - OpPush - // OpLitPush pushes a component to stack if it matches to the literal - OpLitPush - // OpPushM concatenates the remaining components and pushes it to stack - OpPushM - // OpConcatN pops N items from stack, concatenates them and pushes it back to stack - OpConcatN - // OpCapture pops an item and binds it to the variable - OpCapture - // OpEnd is the least positive invalid opcode. - OpEnd -) diff --git a/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go b/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go deleted file mode 100644 index 6dd3854..0000000 --- a/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go +++ /dev/null @@ -1,20 +0,0 @@ -package utilities - -import ( - "bytes" - "io" - "io/ioutil" -) - -// IOReaderFactory takes in an io.Reader and returns a function that will allow you to create a new reader that begins -// at the start of the stream -func IOReaderFactory(r io.Reader) (func() io.Reader, error) { - b, err := ioutil.ReadAll(r) - if err != nil { - return nil, err - } - - return func() io.Reader { - return bytes.NewReader(b) - }, nil -} diff --git a/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go b/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go deleted file mode 100644 index c2b7b30..0000000 --- a/cmd/bpa-operator/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go +++ /dev/null @@ -1,177 +0,0 @@ -package utilities - -import ( - "sort" -) - -// DoubleArray is a Double Array implementation of trie on sequences of strings. -type DoubleArray struct { - // Encoding keeps an encoding from string to int - Encoding map[string]int - // Base is the base array of Double Array - Base []int - // Check is the check array of Double Array - Check []int -} - -// NewDoubleArray builds a DoubleArray from a set of sequences of strings. -func NewDoubleArray(seqs [][]string) *DoubleArray { - da := &DoubleArray{Encoding: make(map[string]int)} - if len(seqs) == 0 { - return da - } - - encoded := registerTokens(da, seqs) - sort.Sort(byLex(encoded)) - - root := node{row: -1, col: -1, left: 0, right: len(encoded)} - addSeqs(da, encoded, 0, root) - - for i := len(da.Base); i > 0; i-- { - if da.Check[i-1] != 0 { - da.Base = da.Base[:i] - da.Check = da.Check[:i] - break - } - } - return da -} - -func registerTokens(da *DoubleArray, seqs [][]string) [][]int { - var result [][]int - for _, seq := range seqs { - var encoded []int - for _, token := range seq { - if _, ok := da.Encoding[token]; !ok { - da.Encoding[token] = len(da.Encoding) - } - encoded = append(encoded, da.Encoding[token]) - } - result = append(result, encoded) - } - for i := range result { - result[i] = append(result[i], len(da.Encoding)) - } - return result -} - -type node struct { - row, col int - left, right int -} - -func (n node) value(seqs [][]int) int { - return seqs[n.row][n.col] -} - -func (n node) children(seqs [][]int) []*node { - var result []*node - lastVal := int(-1) - last := new(node) - for i := n.left; i < n.right; i++ { - if lastVal == seqs[i][n.col+1] { - continue - } - last.right = i - last = &node{ - row: i, - col: n.col + 1, - left: i, - } - result = append(result, last) - } - last.right = n.right - return result -} - -func addSeqs(da *DoubleArray, seqs [][]int, pos int, n node) { - ensureSize(da, pos) - - children := n.children(seqs) - var i int - for i = 1; ; i++ { - ok := func() bool { - for _, child := range children { - code := child.value(seqs) - j := i + code - ensureSize(da, j) - if da.Check[j] != 0 { - return false - } - } - return true - }() - if ok { - break - } - } - da.Base[pos] = i - for _, child := range children { - code := child.value(seqs) - j := i + code - da.Check[j] = pos + 1 - } - terminator := len(da.Encoding) - for _, child := range children { - code := child.value(seqs) - if code == terminator { - continue - } - j := i + code - addSeqs(da, seqs, j, *child) - } -} - -func ensureSize(da *DoubleArray, i int) { - for i >= len(da.Base) { - da.Base = append(da.Base, make([]int, len(da.Base)+1)...) - da.Check = append(da.Check, make([]int, len(da.Check)+1)...) - } -} - -type byLex [][]int - -func (l byLex) Len() int { return len(l) } -func (l byLex) Swap(i, j int) { l[i], l[j] = l[j], l[i] } -func (l byLex) Less(i, j int) bool { - si := l[i] - sj := l[j] - var k int - for k = 0; k < len(si) && k < len(sj); k++ { - if si[k] < sj[k] { - return true - } - if si[k] > sj[k] { - return false - } - } - if k < len(sj) { - return true - } - return false -} - -// HasCommonPrefix determines if any sequence in the DoubleArray is a prefix of the given sequence. -func (da *DoubleArray) HasCommonPrefix(seq []string) bool { - if len(da.Base) == 0 { - return false - } - - var i int - for _, t := range seq { - code, ok := da.Encoding[t] - if !ok { - break - } - j := da.Base[i] + code - if len(da.Check) <= j || da.Check[j] != i+1 { - break - } - i = j - } - j := da.Base[i] + len(da.Encoding) - if len(da.Check) <= j || da.Check[j] != i+1 { - return false - } - return true -} diff --git a/cmd/bpa-operator/vendor/github.com/hashicorp/golang-lru/.gitignore b/cmd/bpa-operator/vendor/github.com/hashicorp/golang-lru/.gitignore deleted file mode 100644 index 8365624..0000000 --- a/cmd/bpa-operator/vendor/github.com/hashicorp/golang-lru/.gitignore +++ /dev/null @@ -1,23 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test diff --git a/cmd/bpa-operator/vendor/github.com/hashicorp/golang-lru/2q.go b/cmd/bpa-operator/vendor/github.com/hashicorp/golang-lru/2q.go deleted file mode 100644 index e474cd0..0000000 --- a/cmd/bpa-operator/vendor/github.com/hashicorp/golang-lru/2q.go +++ /dev/null @@ -1,223 +0,0 @@ -package lru - -import ( - "fmt" - "sync" - - "github.com/hashicorp/golang-lru/simplelru" -) - -const ( - // Default2QRecentRatio is the ratio of the 2Q cache dedicated - // to recently added entries that have only been accessed once. - Default2QRecentRatio = 0.25 - - // Default2QGhostEntries is the default ratio of ghost - // entries kept to track entries recently evicted - Default2QGhostEntries = 0.50 -) - -// TwoQueueCache is a thread-safe fixed size 2Q cache. -// 2Q is an enhancement over the standard LRU cache -// in that it tracks both frequently and recently used -// entries separately. This avoids a burst in access to new -// entries from evicting frequently used entries. It adds some -// additional tracking overhead to the standard LRU cache, and is -// computationally about 2x the cost, and adds some metadata over -// head. The ARCCache is similar, but does not require setting any -// parameters. -type TwoQueueCache struct { - size int - recentSize int - - recent simplelru.LRUCache - frequent simplelru.LRUCache - recentEvict simplelru.LRUCache - lock sync.RWMutex -} - -// New2Q creates a new TwoQueueCache using the default -// values for the parameters. -func New2Q(size int) (*TwoQueueCache, error) { - return New2QParams(size, Default2QRecentRatio, Default2QGhostEntries) -} - -// New2QParams creates a new TwoQueueCache using the provided -// parameter values. -func New2QParams(size int, recentRatio float64, ghostRatio float64) (*TwoQueueCache, error) { - if size <= 0 { - return nil, fmt.Errorf("invalid size") - } - if recentRatio < 0.0 || recentRatio > 1.0 { - return nil, fmt.Errorf("invalid recent ratio") - } - if ghostRatio < 0.0 || ghostRatio > 1.0 { - return nil, fmt.Errorf("invalid ghost ratio") - } - - // Determine the sub-sizes - recentSize := int(float64(size) * recentRatio) - evictSize := int(float64(size) * ghostRatio) - - // Allocate the LRUs - recent, err := simplelru.NewLRU(size, nil) - if err != nil { - return nil, err - } - frequent, err := simplelru.NewLRU(size, nil) - if err != nil { - return nil, err - } - recentEvict, err := simplelru.NewLRU(evictSize, nil) - if err != nil { - return nil, err - } - - // Initialize the cache - c := &TwoQueueCache{ - size: size, - recentSize: recentSize, - recent: recent, - frequent: frequent, - recentEvict: recentEvict, - } - return c, nil -} - -// Get looks up a key's value from the cache. -func (c *TwoQueueCache) Get(key interface{}) (value interface{}, ok bool) { - c.lock.Lock() - defer c.lock.Unlock() - - // Check if this is a frequent value - if val, ok := c.frequent.Get(key); ok { - return val, ok - } - - // If the value is contained in recent, then we - // promote it to frequent - if val, ok := c.recent.Peek(key); ok { - c.recent.Remove(key) - c.frequent.Add(key, val) - return val, ok - } - - // No hit - return nil, false -} - -// Add adds a value to the cache. -func (c *TwoQueueCache) Add(key, value interface{}) { - c.lock.Lock() - defer c.lock.Unlock() - - // Check if the value is frequently used already, - // and just update the value - if c.frequent.Contains(key) { - c.frequent.Add(key, value) - return - } - - // Check if the value is recently used, and promote - // the value into the frequent list - if c.recent.Contains(key) { - c.recent.Remove(key) - c.frequent.Add(key, value) - return - } - - // If the value was recently evicted, add it to the - // frequently used list - if c.recentEvict.Contains(key) { - c.ensureSpace(true) - c.recentEvict.Remove(key) - c.frequent.Add(key, value) - return - } - - // Add to the recently seen list - c.ensureSpace(false) - c.recent.Add(key, value) - return -} - -// ensureSpace is used to ensure we have space in the cache -func (c *TwoQueueCache) ensureSpace(recentEvict bool) { - // If we have space, nothing to do - recentLen := c.recent.Len() - freqLen := c.frequent.Len() - if recentLen+freqLen < c.size { - return - } - - // If the recent buffer is larger than - // the target, evict from there - if recentLen > 0 && (recentLen > c.recentSize || (recentLen == c.recentSize && !recentEvict)) { - k, _, _ := c.recent.RemoveOldest() - c.recentEvict.Add(k, nil) - return - } - - // Remove from the frequent list otherwise - c.frequent.RemoveOldest() -} - -// Len returns the number of items in the cache. -func (c *TwoQueueCache) Len() int { - c.lock.RLock() - defer c.lock.RUnlock() - return c.recent.Len() + c.frequent.Len() -} - -// Keys returns a slice of the keys in the cache. -// The frequently used keys are first in the returned slice. -func (c *TwoQueueCache) Keys() []interface{} { - c.lock.RLock() - defer c.lock.RUnlock() - k1 := c.frequent.Keys() - k2 := c.recent.Keys() - return append(k1, k2...) -} - -// Remove removes the provided key from the cache. -func (c *TwoQueueCache) Remove(key interface{}) { - c.lock.Lock() - defer c.lock.Unlock() - if c.frequent.Remove(key) { - return - } - if c.recent.Remove(key) { - return - } - if c.recentEvict.Remove(key) { - return - } -} - -// Purge is used to completely clear the cache. -func (c *TwoQueueCache) Purge() { - c.lock.Lock() - defer c.lock.Unlock() - c.recent.Purge() - c.frequent.Purge() - c.recentEvict.Purge() -} - -// Contains is used to check if the cache contains a key -// without updating recency or frequency. -func (c *TwoQueueCache) Contains(key interface{}) bool { - c.lock.RLock() - defer c.lock.RUnlock() - return c.frequent.Contains(key) || c.recent.Contains(key) -} - -// Peek is used to inspect the cache value of a key -// without updating recency or frequency. -func (c *TwoQueueCache) Peek(key interface{}) (value interface{}, ok bool) { - c.lock.RLock() - defer c.lock.RUnlock() - if val, ok := c.frequent.Peek(key); ok { - return val, ok - } - return c.recent.Peek(key) -} diff --git a/cmd/bpa-operator/vendor/github.com/hashicorp/golang-lru/LICENSE b/cmd/bpa-operator/vendor/github.com/hashicorp/golang-lru/LICENSE deleted file mode 100644 index be2cc4d..0000000 --- a/cmd/bpa-operator/vendor/github.com/hashicorp/golang-lru/LICENSE +++ /dev/null @@ -1,362 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. "Contributor" - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. "Contributor Version" - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. - -1.6. "Executable Form" - - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. - -1.8. "License" - - means this document. - -1.9. "Licensable" - - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. - -1.10. "Modifications" - - means any of the following: - - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. "Patent Claims" of a Contributor - - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. - -1.12. "Secondary License" - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. "Source Code Form" - - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. - - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice - - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by - the Mozilla Public License, v. 2.0. diff --git a/cmd/bpa-operator/vendor/github.com/hashicorp/golang-lru/README.md b/cmd/bpa-operator/vendor/github.com/hashicorp/golang-lru/README.md deleted file mode 100644 index 33e58cf..0000000 --- a/cmd/bpa-operator/vendor/github.com/hashicorp/golang-lru/README.md +++ /dev/null @@ -1,25 +0,0 @@ -golang-lru -========== - -This provides the `lru` package which implements a fixed-size -thread safe LRU cache. It is based on the cache in Groupcache. - -Documentation -============= - -Full docs are available on [Godoc](http://godoc.org/github.com/hashicorp/golang-lru) - -Example -======= - -Using the LRU is very simple: - -```go -l, _ := New(128) -for i := 0; i < 256; i++ { - l.Add(i, nil) -} -if l.Len() != 128 { - panic(fmt.Sprintf("bad len: %v", l.Len())) -} -``` diff --git a/cmd/bpa-operator/vendor/github.com/hashicorp/golang-lru/arc.go b/cmd/bpa-operator/vendor/github.com/hashicorp/golang-lru/arc.go deleted file mode 100644 index 555225a..0000000 --- a/cmd/bpa-operator/vendor/github.com/hashicorp/golang-lru/arc.go +++ /dev/null @@ -1,257 +0,0 @@ -package lru - -import ( - "sync" - - "github.com/hashicorp/golang-lru/simplelru" -) - -// ARCCache is a thread-safe fixed size Adaptive Replacement Cache (ARC). -// ARC is an enhancement over the standard LRU cache in that tracks both -// frequency and recency of use. This avoids a burst in access to new -// entries from evicting the frequently used older entries. It adds some -// additional tracking overhead to a standard LRU cache, computationally -// it is roughly 2x the cost, and the extra memory overhead is linear -// with the size of the cache. ARC has been patented by IBM, but is -// similar to the TwoQueueCache (2Q) which requires setting parameters. -type ARCCache struct { - size int // Size is the total capacity of the cache - p int // P is the dynamic preference towards T1 or T2 - - t1 simplelru.LRUCache // T1 is the LRU for recently accessed items - b1 simplelru.LRUCache // B1 is the LRU for evictions from t1 - - t2 simplelru.LRUCache // T2 is the LRU for frequently accessed items - b2 simplelru.LRUCache // B2 is the LRU for evictions from t2 - - lock sync.RWMutex -} - -// NewARC creates an ARC of the given size -func NewARC(size int) (*ARCCache, error) { - // Create the sub LRUs - b1, err := simplelru.NewLRU(size, nil) - if err != nil { - return nil, err - } - b2, err := simplelru.NewLRU(size, nil) - if err != nil { - return nil, err - } - t1, err := simplelru.NewLRU(size, nil) - if err != nil { - return nil, err - } - t2, err := simplelru.NewLRU(size, nil) - if err != nil { - return nil, err - } - - // Initialize the ARC - c := &ARCCache{ - size: size, - p: 0, - t1: t1, - b1: b1, - t2: t2, - b2: b2, - } - return c, nil -} - -// Get looks up a key's value from the cache. -func (c *ARCCache) Get(key interface{}) (value interface{}, ok bool) { - c.lock.Lock() - defer c.lock.Unlock() - - // If the value is contained in T1 (recent), then - // promote it to T2 (frequent) - if val, ok := c.t1.Peek(key); ok { - c.t1.Remove(key) - c.t2.Add(key, val) - return val, ok - } - - // Check if the value is contained in T2 (frequent) - if val, ok := c.t2.Get(key); ok { - return val, ok - } - - // No hit - return nil, false -} - -// Add adds a value to the cache. -func (c *ARCCache) Add(key, value interface{}) { - c.lock.Lock() - defer c.lock.Unlock() - - // Check if the value is contained in T1 (recent), and potentially - // promote it to frequent T2 - if c.t1.Contains(key) { - c.t1.Remove(key) - c.t2.Add(key, value) - return - } - - // Check if the value is already in T2 (frequent) and update it - if c.t2.Contains(key) { - c.t2.Add(key, value) - return - } - - // Check if this value was recently evicted as part of the - // recently used list - if c.b1.Contains(key) { - // T1 set is too small, increase P appropriately - delta := 1 - b1Len := c.b1.Len() - b2Len := c.b2.Len() - if b2Len > b1Len { - delta = b2Len / b1Len - } - if c.p+delta >= c.size { - c.p = c.size - } else { - c.p += delta - } - - // Potentially need to make room in the cache - if c.t1.Len()+c.t2.Len() >= c.size { - c.replace(false) - } - - // Remove from B1 - c.b1.Remove(key) - - // Add the key to the frequently used list - c.t2.Add(key, value) - return - } - - // Check if this value was recently evicted as part of the - // frequently used list - if c.b2.Contains(key) { - // T2 set is too small, decrease P appropriately - delta := 1 - b1Len := c.b1.Len() - b2Len := c.b2.Len() - if b1Len > b2Len { - delta = b1Len / b2Len - } - if delta >= c.p { - c.p = 0 - } else { - c.p -= delta - } - - // Potentially need to make room in the cache - if c.t1.Len()+c.t2.Len() >= c.size { - c.replace(true) - } - - // Remove from B2 - c.b2.Remove(key) - - // Add the key to the frequently used list - c.t2.Add(key, value) - return - } - - // Potentially need to make room in the cache - if c.t1.Len()+c.t2.Len() >= c.size { - c.replace(false) - } - - // Keep the size of the ghost buffers trim - if c.b1.Len() > c.size-c.p { - c.b1.RemoveOldest() - } - if c.b2.Len() > c.p { - c.b2.RemoveOldest() - } - - // Add to the recently seen list - c.t1.Add(key, value) - return -} - -// replace is used to adaptively evict from either T1 or T2 -// based on the current learned value of P -func (c *ARCCache) replace(b2ContainsKey bool) { - t1Len := c.t1.Len() - if t1Len > 0 && (t1Len > c.p || (t1Len == c.p && b2ContainsKey)) { - k, _, ok := c.t1.RemoveOldest() - if ok { - c.b1.Add(k, nil) - } - } else { - k, _, ok := c.t2.RemoveOldest() - if ok { - c.b2.Add(k, nil) - } - } -} - -// Len returns the number of cached entries -func (c *ARCCache) Len() int { - c.lock.RLock() - defer c.lock.RUnlock() - return c.t1.Len() + c.t2.Len() -} - -// Keys returns all the cached keys -func (c *ARCCache) Keys() []interface{} { - c.lock.RLock() - defer c.lock.RUnlock() - k1 := c.t1.Keys() - k2 := c.t2.Keys() - return append(k1, k2...) -} - -// Remove is used to purge a key from the cache -func (c *ARCCache) Remove(key interface{}) { - c.lock.Lock() - defer c.lock.Unlock() - if c.t1.Remove(key) { - return - } - if c.t2.Remove(key) { - return - } - if c.b1.Remove(key) { - return - } - if c.b2.Remove(key) { - return - } -} - -// Purge is used to clear the cache -func (c *ARCCache) Purge() { - c.lock.Lock() - defer c.lock.Unlock() - c.t1.Purge() - c.t2.Purge() - c.b1.Purge() - c.b2.Purge() -} - -// Contains is used to check if the cache contains a key -// without updating recency or frequency. -func (c *ARCCache) Contains(key interface{}) bool { - c.lock.RLock() - defer c.lock.RUnlock() - return c.t1.Contains(key) || c.t2.Contains(key) -} - -// Peek is used to inspect the cache value of a key -// without updating recency or frequency. -func (c *ARCCache) Peek(key interface{}) (value interface{}, ok bool) { - c.lock.RLock() - defer c.lock.RUnlock() - if val, ok := c.t1.Peek(key); ok { - return val, ok - } - return c.t2.Peek(key) -} diff --git a/cmd/bpa-operator/vendor/github.com/hashicorp/golang-lru/doc.go b/cmd/bpa-operator/vendor/github.com/hashicorp/golang-lru/doc.go deleted file mode 100644 index 2547df9..0000000 --- a/cmd/bpa-operator/vendor/github.com/hashicorp/golang-lru/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -// Package lru provides three different LRU caches of varying sophistication. -// -// Cache is a simple LRU cache. It is based on the -// LRU implementation in groupcache: -// https://github.com/golang/groupcache/tree/master/lru -// -// TwoQueueCache tracks frequently used and recently used entries separately. -// This avoids a burst of accesses from taking out frequently used entries, -// at the cost of about 2x computational overhead and some extra bookkeeping. -// -// ARCCache is an adaptive replacement cache. It tracks recent evictions as -// well as recent usage in both the frequent and recent caches. Its -// computational overhead is comparable to TwoQueueCache, but the memory -// overhead is linear with the size of the cache. -// -// ARC has been patented by IBM, so do not use it if that is problematic for -// your program. -// -// All caches in this package take locks while operating, and are therefore -// thread-safe for consumers. -package lru diff --git a/cmd/bpa-operator/vendor/github.com/hashicorp/golang-lru/go.mod b/cmd/bpa-operator/vendor/github.com/hashicorp/golang-lru/go.mod deleted file mode 100644 index 824cb97..0000000 --- a/cmd/bpa-operator/vendor/github.com/hashicorp/golang-lru/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/hashicorp/golang-lru diff --git a/cmd/bpa-operator/vendor/github.com/hashicorp/golang-lru/lru.go b/cmd/bpa-operator/vendor/github.com/hashicorp/golang-lru/lru.go deleted file mode 100644 index 1cbe04b..0000000 --- a/cmd/bpa-operator/vendor/github.com/hashicorp/golang-lru/lru.go +++ /dev/null @@ -1,116 +0,0 @@ -package lru - -import ( - "sync" - - "github.com/hashicorp/golang-lru/simplelru" -) - -// Cache is a thread-safe fixed size LRU cache. -type Cache struct { - lru simplelru.LRUCache - lock sync.RWMutex -} - -// New creates an LRU of the given size. -func New(size int) (*Cache, error) { - return NewWithEvict(size, nil) -} - -// NewWithEvict constructs a fixed size cache with the given eviction -// callback. -func NewWithEvict(size int, onEvicted func(key interface{}, value interface{})) (*Cache, error) { - lru, err := simplelru.NewLRU(size, simplelru.EvictCallback(onEvicted)) - if err != nil { - return nil, err - } - c := &Cache{ - lru: lru, - } - return c, nil -} - -// Purge is used to completely clear the cache. -func (c *Cache) Purge() { - c.lock.Lock() - c.lru.Purge() - c.lock.Unlock() -} - -// Add adds a value to the cache. Returns true if an eviction occurred. -func (c *Cache) Add(key, value interface{}) (evicted bool) { - c.lock.Lock() - evicted = c.lru.Add(key, value) - c.lock.Unlock() - return evicted -} - -// Get looks up a key's value from the cache. -func (c *Cache) Get(key interface{}) (value interface{}, ok bool) { - c.lock.Lock() - value, ok = c.lru.Get(key) - c.lock.Unlock() - return value, ok -} - -// Contains checks if a key is in the cache, without updating the -// recent-ness or deleting it for being stale. -func (c *Cache) Contains(key interface{}) bool { - c.lock.RLock() - containKey := c.lru.Contains(key) - c.lock.RUnlock() - return containKey -} - -// Peek returns the key value (or undefined if not found) without updating -// the "recently used"-ness of the key. -func (c *Cache) Peek(key interface{}) (value interface{}, ok bool) { - c.lock.RLock() - value, ok = c.lru.Peek(key) - c.lock.RUnlock() - return value, ok -} - -// ContainsOrAdd checks if a key is in the cache without updating the -// recent-ness or deleting it for being stale, and if not, adds the value. -// Returns whether found and whether an eviction occurred. -func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evicted bool) { - c.lock.Lock() - defer c.lock.Unlock() - - if c.lru.Contains(key) { - return true, false - } - evicted = c.lru.Add(key, value) - return false, evicted -} - -// Remove removes the provided key from the cache. -func (c *Cache) Remove(key interface{}) { - c.lock.Lock() - c.lru.Remove(key) - c.lock.Unlock() -} - -// RemoveOldest removes the oldest item from the cache. -func (c *Cache) RemoveOldest() { - c.lock.Lock() - c.lru.RemoveOldest() - c.lock.Unlock() -} - -// Keys returns a slice of the keys in the cache, from oldest to newest. -func (c *Cache) Keys() []interface{} { - c.lock.RLock() - keys := c.lru.Keys() - c.lock.RUnlock() - return keys -} - -// Len returns the number of items in the cache. -func (c *Cache) Len() int { - c.lock.RLock() - length := c.lru.Len() - c.lock.RUnlock() - return length -} diff --git a/cmd/bpa-operator/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go b/cmd/bpa-operator/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go deleted file mode 100644 index 5673773..0000000 --- a/cmd/bpa-operator/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go +++ /dev/null @@ -1,161 +0,0 @@ -package simplelru - -import ( - "container/list" - "errors" -) - -// EvictCallback is used to get a callback when a cache entry is evicted -type EvictCallback func(key interface{}, value interface{}) - -// LRU implements a non-thread safe fixed size LRU cache -type LRU struct { - size int - evictList *list.List - items map[interface{}]*list.Element - onEvict EvictCallback -} - -// entry is used to hold a value in the evictList -type entry struct { - key interface{} - value interface{} -} - -// NewLRU constructs an LRU of the given size -func NewLRU(size int, onEvict EvictCallback) (*LRU, error) { - if size <= 0 { - return nil, errors.New("Must provide a positive size") - } - c := &LRU{ - size: size, - evictList: list.New(), - items: make(map[interface{}]*list.Element), - onEvict: onEvict, - } - return c, nil -} - -// Purge is used to completely clear the cache. -func (c *LRU) Purge() { - for k, v := range c.items { - if c.onEvict != nil { - c.onEvict(k, v.Value.(*entry).value) - } - delete(c.items, k) - } - c.evictList.Init() -} - -// Add adds a value to the cache. Returns true if an eviction occurred. -func (c *LRU) Add(key, value interface{}) (evicted bool) { - // Check for existing item - if ent, ok := c.items[key]; ok { - c.evictList.MoveToFront(ent) - ent.Value.(*entry).value = value - return false - } - - // Add new item - ent := &entry{key, value} - entry := c.evictList.PushFront(ent) - c.items[key] = entry - - evict := c.evictList.Len() > c.size - // Verify size not exceeded - if evict { - c.removeOldest() - } - return evict -} - -// Get looks up a key's value from the cache. -func (c *LRU) Get(key interface{}) (value interface{}, ok bool) { - if ent, ok := c.items[key]; ok { - c.evictList.MoveToFront(ent) - return ent.Value.(*entry).value, true - } - return -} - -// Contains checks if a key is in the cache, without updating the recent-ness -// or deleting it for being stale. -func (c *LRU) Contains(key interface{}) (ok bool) { - _, ok = c.items[key] - return ok -} - -// Peek returns the key value (or undefined if not found) without updating -// the "recently used"-ness of the key. -func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) { - var ent *list.Element - if ent, ok = c.items[key]; ok { - return ent.Value.(*entry).value, true - } - return nil, ok -} - -// Remove removes the provided key from the cache, returning if the -// key was contained. -func (c *LRU) Remove(key interface{}) (present bool) { - if ent, ok := c.items[key]; ok { - c.removeElement(ent) - return true - } - return false -} - -// RemoveOldest removes the oldest item from the cache. -func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) { - ent := c.evictList.Back() - if ent != nil { - c.removeElement(ent) - kv := ent.Value.(*entry) - return kv.key, kv.value, true - } - return nil, nil, false -} - -// GetOldest returns the oldest entry -func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) { - ent := c.evictList.Back() - if ent != nil { - kv := ent.Value.(*entry) - return kv.key, kv.value, true - } - return nil, nil, false -} - -// Keys returns a slice of the keys in the cache, from oldest to newest. -func (c *LRU) Keys() []interface{} { - keys := make([]interface{}, len(c.items)) - i := 0 - for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() { - keys[i] = ent.Value.(*entry).key - i++ - } - return keys -} - -// Len returns the number of items in the cache. -func (c *LRU) Len() int { - return c.evictList.Len() -} - -// removeOldest removes the oldest item from the cache. -func (c *LRU) removeOldest() { - ent := c.evictList.Back() - if ent != nil { - c.removeElement(ent) - } -} - -// removeElement is used to remove a given list element from the cache -func (c *LRU) removeElement(e *list.Element) { - c.evictList.Remove(e) - kv := e.Value.(*entry) - delete(c.items, kv.key) - if c.onEvict != nil { - c.onEvict(kv.key, kv.value) - } -} diff --git a/cmd/bpa-operator/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go b/cmd/bpa-operator/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go deleted file mode 100644 index 74c7077..0000000 --- a/cmd/bpa-operator/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go +++ /dev/null @@ -1,36 +0,0 @@ -package simplelru - -// LRUCache is the interface for simple LRU cache. -type LRUCache interface { - // Adds a value to the cache, returns true if an eviction occurred and - // updates the "recently used"-ness of the key. - Add(key, value interface{}) bool - - // Returns key's value from the cache and - // updates the "recently used"-ness of the key. #value, isFound - Get(key interface{}) (value interface{}, ok bool) - - // Check if a key exsists in cache without updating the recent-ness. - Contains(key interface{}) (ok bool) - - // Returns key's value without updating the "recently used"-ness of the key. - Peek(key interface{}) (value interface{}, ok bool) - - // Removes a key from the cache. - Remove(key interface{}) bool - - // Removes the oldest entry from cache. - RemoveOldest() (interface{}, interface{}, bool) - - // Returns the oldest entry from the cache. #key, value, isFound - GetOldest() (interface{}, interface{}, bool) - - // Returns a slice of the keys in the cache, from oldest to newest. - Keys() []interface{} - - // Returns the number of items in the cache. - Len() int - - // Clear all cache entries - Purge() -} diff --git a/cmd/bpa-operator/vendor/github.com/imdario/mergo/.gitignore b/cmd/bpa-operator/vendor/github.com/imdario/mergo/.gitignore deleted file mode 100644 index 529c341..0000000 --- a/cmd/bpa-operator/vendor/github.com/imdario/mergo/.gitignore +++ /dev/null @@ -1,33 +0,0 @@ -#### joe made this: http://goel.io/joe - -#### go #### -# Binaries for programs and plugins -*.exe -*.dll -*.so -*.dylib - -# Test binary, build with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - -# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 -.glide/ - -#### vim #### -# Swap -[._]*.s[a-v][a-z] -[._]*.sw[a-p] -[._]s[a-v][a-z] -[._]sw[a-p] - -# Session -Session.vim - -# Temporary -.netrwhist -*~ -# Auto-generated tag files -tags diff --git a/cmd/bpa-operator/vendor/github.com/imdario/mergo/.travis.yml b/cmd/bpa-operator/vendor/github.com/imdario/mergo/.travis.yml deleted file mode 100644 index b13a50e..0000000 --- a/cmd/bpa-operator/vendor/github.com/imdario/mergo/.travis.yml +++ /dev/null @@ -1,7 +0,0 @@ -language: go -install: - - go get -t - - go get golang.org/x/tools/cmd/cover - - go get github.com/mattn/goveralls -script: - - $HOME/gopath/bin/goveralls -service=travis-ci -repotoken $COVERALLS_TOKEN diff --git a/cmd/bpa-operator/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md b/cmd/bpa-operator/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md deleted file mode 100644 index 469b449..0000000 --- a/cmd/bpa-operator/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,46 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at i@dario.im. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/cmd/bpa-operator/vendor/github.com/imdario/mergo/LICENSE b/cmd/bpa-operator/vendor/github.com/imdario/mergo/LICENSE deleted file mode 100644 index 6866802..0000000 --- a/cmd/bpa-operator/vendor/github.com/imdario/mergo/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2013 Dario Castañé. All rights reserved. -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/cmd/bpa-operator/vendor/github.com/imdario/mergo/README.md b/cmd/bpa-operator/vendor/github.com/imdario/mergo/README.md deleted file mode 100644 index 02fc81e..0000000 --- a/cmd/bpa-operator/vendor/github.com/imdario/mergo/README.md +++ /dev/null @@ -1,238 +0,0 @@ -# Mergo - -A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. - -Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche. - -## Status - -It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild). - -[![GoDoc][3]][4] -[![GoCard][5]][6] -[![Build Status][1]][2] -[![Coverage Status][7]][8] -[![Sourcegraph][9]][10] -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield) - -[1]: https://travis-ci.org/imdario/mergo.png -[2]: https://travis-ci.org/imdario/mergo -[3]: https://godoc.org/github.com/imdario/mergo?status.svg -[4]: https://godoc.org/github.com/imdario/mergo -[5]: https://goreportcard.com/badge/imdario/mergo -[6]: https://goreportcard.com/report/github.com/imdario/mergo -[7]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master -[8]: https://coveralls.io/github/imdario/mergo?branch=master -[9]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg -[10]: https://sourcegraph.com/github.com/imdario/mergo?badge - -### Latest release - -[Release v0.3.7](https://github.com/imdario/mergo/releases/tag/v0.3.7). - -### Important note - -Please keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2) Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). An optional/variadic argument has been added, so it won't break existing code. - -If you were using Mergo **before** April 6th 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause (I hope it won't!) in existing projects after the change (release 0.2.0). - -### Donations - -If Mergo is useful to you, consider buying me a coffee, a beer or making a monthly donation so I can keep building great free software. :heart_eyes: - -Buy Me a Coffee at ko-fi.com -[![Beerpay](https://beerpay.io/imdario/mergo/badge.svg)](https://beerpay.io/imdario/mergo) -[![Beerpay](https://beerpay.io/imdario/mergo/make-wish.svg)](https://beerpay.io/imdario/mergo) -Donate using Liberapay - -### Mergo in the wild - -- [moby/moby](https://github.com/moby/moby) -- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) -- [vmware/dispatch](https://github.com/vmware/dispatch) -- [Shopify/themekit](https://github.com/Shopify/themekit) -- [imdario/zas](https://github.com/imdario/zas) -- [matcornic/hermes](https://github.com/matcornic/hermes) -- [OpenBazaar/openbazaar-go](https://github.com/OpenBazaar/openbazaar-go) -- [kataras/iris](https://github.com/kataras/iris) -- [michaelsauter/crane](https://github.com/michaelsauter/crane) -- [go-task/task](https://github.com/go-task/task) -- [sensu/uchiwa](https://github.com/sensu/uchiwa) -- [ory/hydra](https://github.com/ory/hydra) -- [sisatech/vcli](https://github.com/sisatech/vcli) -- [dairycart/dairycart](https://github.com/dairycart/dairycart) -- [projectcalico/felix](https://github.com/projectcalico/felix) -- [resin-os/balena](https://github.com/resin-os/balena) -- [go-kivik/kivik](https://github.com/go-kivik/kivik) -- [Telefonica/govice](https://github.com/Telefonica/govice) -- [supergiant/supergiant](supergiant/supergiant) -- [SergeyTsalkov/brooce](https://github.com/SergeyTsalkov/brooce) -- [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy) -- [ohsu-comp-bio/funnel](https://github.com/ohsu-comp-bio/funnel) -- [EagerIO/Stout](https://github.com/EagerIO/Stout) -- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api) -- [russross/canvasassignments](https://github.com/russross/canvasassignments) -- [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api) -- [casualjim/exeggutor](https://github.com/casualjim/exeggutor) -- [divshot/gitling](https://github.com/divshot/gitling) -- [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl) -- [andrerocker/deploy42](https://github.com/andrerocker/deploy42) -- [elwinar/rambler](https://github.com/elwinar/rambler) -- [tmaiaroto/gopartman](https://github.com/tmaiaroto/gopartman) -- [jfbus/impressionist](https://github.com/jfbus/impressionist) -- [Jmeyering/zealot](https://github.com/Jmeyering/zealot) -- [godep-migrator/rigger-host](https://github.com/godep-migrator/rigger-host) -- [Dronevery/MultiwaySwitch-Go](https://github.com/Dronevery/MultiwaySwitch-Go) -- [thoas/picfit](https://github.com/thoas/picfit) -- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server) -- [jnuthong/item_search](https://github.com/jnuthong/item_search) -- [bukalapak/snowboard](https://github.com/bukalapak/snowboard) - -## Installation - - go get github.com/imdario/mergo - - // use in your .go code - import ( - "github.com/imdario/mergo" - ) - -## Usage - -You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are not considered zero values](https://golang.org/ref/spec#The_zero_value) either. Also maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). - -```go -if err := mergo.Merge(&dst, src); err != nil { - // ... -} -``` - -Also, you can merge overwriting values using the transformer `WithOverride`. - -```go -if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { - // ... -} -``` - -Additionally, you can map a `map[string]interface{}` to a struct (and otherwise, from struct to map), following the same restrictions as in `Merge()`. Keys are capitalized to find each corresponding exported field. - -```go -if err := mergo.Map(&dst, srcMap); err != nil { - // ... -} -``` - -Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values. - -More information and examples in [godoc documentation](http://godoc.org/github.com/imdario/mergo). - -### Nice example - -```go -package main - -import ( - "fmt" - "github.com/imdario/mergo" -) - -type Foo struct { - A string - B int64 -} - -func main() { - src := Foo{ - A: "one", - B: 2, - } - dest := Foo{ - A: "two", - } - mergo.Merge(&dest, src) - fmt.Println(dest) - // Will print - // {two 2} -} -``` - -Note: if test are failing due missing package, please execute: - - go get gopkg.in/yaml.v2 - -### Transformers - -Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, `time.Time` is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero `time.Time`? - -```go -package main - -import ( - "fmt" - "github.com/imdario/mergo" - "reflect" - "time" -) - -type timeTransfomer struct { -} - -func (t timeTransfomer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { - if typ == reflect.TypeOf(time.Time{}) { - return func(dst, src reflect.Value) error { - if dst.CanSet() { - isZero := dst.MethodByName("IsZero") - result := isZero.Call([]reflect.Value{}) - if result[0].Bool() { - dst.Set(src) - } - } - return nil - } - } - return nil -} - -type Snapshot struct { - Time time.Time - // ... -} - -func main() { - src := Snapshot{time.Now()} - dest := Snapshot{} - mergo.Merge(&dest, src, mergo.WithTransformers(timeTransfomer{})) - fmt.Println(dest) - // Will print - // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } -} -``` - - -## Contact me - -If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario) - -## About - -Written by [Dario Castañé](http://dario.im). - -## Top Contributors - -[![0](https://sourcerer.io/fame/imdario/imdario/mergo/images/0)](https://sourcerer.io/fame/imdario/imdario/mergo/links/0) -[![1](https://sourcerer.io/fame/imdario/imdario/mergo/images/1)](https://sourcerer.io/fame/imdario/imdario/mergo/links/1) -[![2](https://sourcerer.io/fame/imdario/imdario/mergo/images/2)](https://sourcerer.io/fame/imdario/imdario/mergo/links/2) -[![3](https://sourcerer.io/fame/imdario/imdario/mergo/images/3)](https://sourcerer.io/fame/imdario/imdario/mergo/links/3) -[![4](https://sourcerer.io/fame/imdario/imdario/mergo/images/4)](https://sourcerer.io/fame/imdario/imdario/mergo/links/4) -[![5](https://sourcerer.io/fame/imdario/imdario/mergo/images/5)](https://sourcerer.io/fame/imdario/imdario/mergo/links/5) -[![6](https://sourcerer.io/fame/imdario/imdario/mergo/images/6)](https://sourcerer.io/fame/imdario/imdario/mergo/links/6) -[![7](https://sourcerer.io/fame/imdario/imdario/mergo/images/7)](https://sourcerer.io/fame/imdario/imdario/mergo/links/7) - - -## License - -[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE). - - -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_large) diff --git a/cmd/bpa-operator/vendor/github.com/imdario/mergo/doc.go b/cmd/bpa-operator/vendor/github.com/imdario/mergo/doc.go deleted file mode 100644 index 6e9aa7b..0000000 --- a/cmd/bpa-operator/vendor/github.com/imdario/mergo/doc.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2013 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package mergo merges same-type structs and maps by setting default values in zero-value fields. - -Mergo won't merge unexported (private) fields but will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). - -Usage - -From my own work-in-progress project: - - type networkConfig struct { - Protocol string - Address string - ServerType string `json: "server_type"` - Port uint16 - } - - type FssnConfig struct { - Network networkConfig - } - - var fssnDefault = FssnConfig { - networkConfig { - "tcp", - "127.0.0.1", - "http", - 31560, - }, - } - - // Inside a function [...] - - if err := mergo.Merge(&config, fssnDefault); err != nil { - log.Fatal(err) - } - - // More code [...] - -*/ -package mergo diff --git a/cmd/bpa-operator/vendor/github.com/imdario/mergo/map.go b/cmd/bpa-operator/vendor/github.com/imdario/mergo/map.go deleted file mode 100644 index 3f5afa8..0000000 --- a/cmd/bpa-operator/vendor/github.com/imdario/mergo/map.go +++ /dev/null @@ -1,175 +0,0 @@ -// Copyright 2014 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Based on src/pkg/reflect/deepequal.go from official -// golang's stdlib. - -package mergo - -import ( - "fmt" - "reflect" - "unicode" - "unicode/utf8" -) - -func changeInitialCase(s string, mapper func(rune) rune) string { - if s == "" { - return s - } - r, n := utf8.DecodeRuneInString(s) - return string(mapper(r)) + s[n:] -} - -func isExported(field reflect.StructField) bool { - r, _ := utf8.DecodeRuneInString(field.Name) - return r >= 'A' && r <= 'Z' -} - -// Traverses recursively both values, assigning src's fields values to dst. -// The map argument tracks comparisons that have already been seen, which allows -// short circuiting on recursive types. -func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { - overwrite := config.Overwrite - if dst.CanAddr() { - addr := dst.UnsafeAddr() - h := 17 * addr - seen := visited[h] - typ := dst.Type() - for p := seen; p != nil; p = p.next { - if p.ptr == addr && p.typ == typ { - return nil - } - } - // Remember, remember... - visited[h] = &visit{addr, typ, seen} - } - zeroValue := reflect.Value{} - switch dst.Kind() { - case reflect.Map: - dstMap := dst.Interface().(map[string]interface{}) - for i, n := 0, src.NumField(); i < n; i++ { - srcType := src.Type() - field := srcType.Field(i) - if !isExported(field) { - continue - } - fieldName := field.Name - fieldName = changeInitialCase(fieldName, unicode.ToLower) - if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v)) || overwrite) { - dstMap[fieldName] = src.Field(i).Interface() - } - } - case reflect.Ptr: - if dst.IsNil() { - v := reflect.New(dst.Type().Elem()) - dst.Set(v) - } - dst = dst.Elem() - fallthrough - case reflect.Struct: - srcMap := src.Interface().(map[string]interface{}) - for key := range srcMap { - config.overwriteWithEmptyValue = true - srcValue := srcMap[key] - fieldName := changeInitialCase(key, unicode.ToUpper) - dstElement := dst.FieldByName(fieldName) - if dstElement == zeroValue { - // We discard it because the field doesn't exist. - continue - } - srcElement := reflect.ValueOf(srcValue) - dstKind := dstElement.Kind() - srcKind := srcElement.Kind() - if srcKind == reflect.Ptr && dstKind != reflect.Ptr { - srcElement = srcElement.Elem() - srcKind = reflect.TypeOf(srcElement.Interface()).Kind() - } else if dstKind == reflect.Ptr { - // Can this work? I guess it can't. - if srcKind != reflect.Ptr && srcElement.CanAddr() { - srcPtr := srcElement.Addr() - srcElement = reflect.ValueOf(srcPtr) - srcKind = reflect.Ptr - } - } - - if !srcElement.IsValid() { - continue - } - if srcKind == dstKind { - if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface { - if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } else if srcKind == reflect.Map { - if err = deepMap(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } else { - return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind) - } - } - } - return -} - -// Map sets fields' values in dst from src. -// src can be a map with string keys or a struct. dst must be the opposite: -// if src is a map, dst must be a valid pointer to struct. If src is a struct, -// dst must be map[string]interface{}. -// It won't merge unexported (private) fields and will do recursively -// any exported field. -// If dst is a map, keys will be src fields' names in lower camel case. -// Missing key in src that doesn't match a field in dst will be skipped. This -// doesn't apply if dst is a map. -// This is separated method from Merge because it is cleaner and it keeps sane -// semantics: merging equal types, mapping different (restricted) types. -func Map(dst, src interface{}, opts ...func(*Config)) error { - return _map(dst, src, opts...) -} - -// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overridden by -// non-empty src attribute values. -// Deprecated: Use Map(…) with WithOverride -func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { - return _map(dst, src, append(opts, WithOverride)...) -} - -func _map(dst, src interface{}, opts ...func(*Config)) error { - var ( - vDst, vSrc reflect.Value - err error - ) - config := &Config{} - - for _, opt := range opts { - opt(config) - } - - if vDst, vSrc, err = resolveValues(dst, src); err != nil { - return err - } - // To be friction-less, we redirect equal-type arguments - // to deepMerge. Only because arguments can be anything. - if vSrc.Kind() == vDst.Kind() { - return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) - } - switch vSrc.Kind() { - case reflect.Struct: - if vDst.Kind() != reflect.Map { - return ErrExpectedMapAsDestination - } - case reflect.Map: - if vDst.Kind() != reflect.Struct { - return ErrExpectedStructAsDestination - } - default: - return ErrNotSupported - } - return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, config) -} diff --git a/cmd/bpa-operator/vendor/github.com/imdario/mergo/merge.go b/cmd/bpa-operator/vendor/github.com/imdario/mergo/merge.go deleted file mode 100644 index f8de6c5..0000000 --- a/cmd/bpa-operator/vendor/github.com/imdario/mergo/merge.go +++ /dev/null @@ -1,255 +0,0 @@ -// Copyright 2013 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Based on src/pkg/reflect/deepequal.go from official -// golang's stdlib. - -package mergo - -import ( - "fmt" - "reflect" -) - -func hasExportedField(dst reflect.Value) (exported bool) { - for i, n := 0, dst.NumField(); i < n; i++ { - field := dst.Type().Field(i) - if field.Anonymous && dst.Field(i).Kind() == reflect.Struct { - exported = exported || hasExportedField(dst.Field(i)) - } else { - exported = exported || len(field.PkgPath) == 0 - } - } - return -} - -type Config struct { - Overwrite bool - AppendSlice bool - Transformers Transformers - overwriteWithEmptyValue bool -} - -type Transformers interface { - Transformer(reflect.Type) func(dst, src reflect.Value) error -} - -// Traverses recursively both values, assigning src's fields values to dst. -// The map argument tracks comparisons that have already been seen, which allows -// short circuiting on recursive types. -func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { - overwrite := config.Overwrite - overwriteWithEmptySrc := config.overwriteWithEmptyValue - config.overwriteWithEmptyValue = false - - if !src.IsValid() { - return - } - if dst.CanAddr() { - addr := dst.UnsafeAddr() - h := 17 * addr - seen := visited[h] - typ := dst.Type() - for p := seen; p != nil; p = p.next { - if p.ptr == addr && p.typ == typ { - return nil - } - } - // Remember, remember... - visited[h] = &visit{addr, typ, seen} - } - - if config.Transformers != nil && !isEmptyValue(dst) { - if fn := config.Transformers.Transformer(dst.Type()); fn != nil { - err = fn(dst, src) - return - } - } - - switch dst.Kind() { - case reflect.Struct: - if hasExportedField(dst) { - for i, n := 0, dst.NumField(); i < n; i++ { - if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil { - return - } - } - } else { - if dst.CanSet() && (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) { - dst.Set(src) - } - } - case reflect.Map: - if dst.IsNil() && !src.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) - } - for _, key := range src.MapKeys() { - srcElement := src.MapIndex(key) - if !srcElement.IsValid() { - continue - } - dstElement := dst.MapIndex(key) - switch srcElement.Kind() { - case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice: - if srcElement.IsNil() { - continue - } - fallthrough - default: - if !srcElement.CanInterface() { - continue - } - switch reflect.TypeOf(srcElement.Interface()).Kind() { - case reflect.Struct: - fallthrough - case reflect.Ptr: - fallthrough - case reflect.Map: - srcMapElm := srcElement - dstMapElm := dstElement - if srcMapElm.CanInterface() { - srcMapElm = reflect.ValueOf(srcMapElm.Interface()) - if dstMapElm.IsValid() { - dstMapElm = reflect.ValueOf(dstMapElm.Interface()) - } - } - if err = deepMerge(dstMapElm, srcMapElm, visited, depth+1, config); err != nil { - return - } - case reflect.Slice: - srcSlice := reflect.ValueOf(srcElement.Interface()) - - var dstSlice reflect.Value - if !dstElement.IsValid() || dstElement.IsNil() { - dstSlice = reflect.MakeSlice(srcSlice.Type(), 0, srcSlice.Len()) - } else { - dstSlice = reflect.ValueOf(dstElement.Interface()) - } - - if (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice { - dstSlice = srcSlice - } else if config.AppendSlice { - if srcSlice.Type() != dstSlice.Type() { - return fmt.Errorf("cannot append two slice with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) - } - dstSlice = reflect.AppendSlice(dstSlice, srcSlice) - } - dst.SetMapIndex(key, dstSlice) - } - } - if dstElement.IsValid() && !isEmptyValue(dstElement) && (reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map || reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice) { - continue - } - - if srcElement.IsValid() && (overwrite || (!dstElement.IsValid() || isEmptyValue(dstElement))) { - if dst.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) - } - dst.SetMapIndex(key, srcElement) - } - } - case reflect.Slice: - if !dst.CanSet() { - break - } - if (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice { - dst.Set(src) - } else if config.AppendSlice { - if src.Type() != dst.Type() { - return fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type()) - } - dst.Set(reflect.AppendSlice(dst, src)) - } - case reflect.Ptr: - fallthrough - case reflect.Interface: - if src.IsNil() { - break - } - if src.Kind() != reflect.Interface { - if dst.IsNil() || overwrite { - if dst.CanSet() && (overwrite || isEmptyValue(dst)) { - dst.Set(src) - } - } else if src.Kind() == reflect.Ptr { - if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { - return - } - } else if dst.Elem().Type() == src.Type() { - if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil { - return - } - } else { - return ErrDifferentArgumentsTypes - } - break - } - if dst.IsNil() || overwrite { - if dst.CanSet() && (overwrite || isEmptyValue(dst)) { - dst.Set(src) - } - } else if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { - return - } - default: - if dst.CanSet() && (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) { - dst.Set(src) - } - } - return -} - -// Merge will fill any empty for value type attributes on the dst struct using corresponding -// src attributes if they themselves are not empty. dst and src must be valid same-type structs -// and dst must be a pointer to struct. -// It won't merge unexported (private) fields and will do recursively any exported field. -func Merge(dst, src interface{}, opts ...func(*Config)) error { - return merge(dst, src, opts...) -} - -// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overriden by -// non-empty src attribute values. -// Deprecated: use Merge(…) with WithOverride -func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { - return merge(dst, src, append(opts, WithOverride)...) -} - -// WithTransformers adds transformers to merge, allowing to customize the merging of some types. -func WithTransformers(transformers Transformers) func(*Config) { - return func(config *Config) { - config.Transformers = transformers - } -} - -// WithOverride will make merge override non-empty dst attributes with non-empty src attributes values. -func WithOverride(config *Config) { - config.Overwrite = true -} - -// WithAppendSlice will make merge append slices instead of overwriting it -func WithAppendSlice(config *Config) { - config.AppendSlice = true -} - -func merge(dst, src interface{}, opts ...func(*Config)) error { - var ( - vDst, vSrc reflect.Value - err error - ) - - config := &Config{} - - for _, opt := range opts { - opt(config) - } - - if vDst, vSrc, err = resolveValues(dst, src); err != nil { - return err - } - if vDst.Type() != vSrc.Type() { - return ErrDifferentArgumentsTypes - } - return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) -} diff --git a/cmd/bpa-operator/vendor/github.com/imdario/mergo/mergo.go b/cmd/bpa-operator/vendor/github.com/imdario/mergo/mergo.go deleted file mode 100644 index a82fea2..0000000 --- a/cmd/bpa-operator/vendor/github.com/imdario/mergo/mergo.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2013 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Based on src/pkg/reflect/deepequal.go from official -// golang's stdlib. - -package mergo - -import ( - "errors" - "reflect" -) - -// Errors reported by Mergo when it finds invalid arguments. -var ( - ErrNilArguments = errors.New("src and dst must not be nil") - ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type") - ErrNotSupported = errors.New("only structs and maps are supported") - ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") - ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") -) - -// During deepMerge, must keep track of checks that are -// in progress. The comparison algorithm assumes that all -// checks in progress are true when it reencounters them. -// Visited are stored in a map indexed by 17 * a1 + a2; -type visit struct { - ptr uintptr - typ reflect.Type - next *visit -} - -// From src/pkg/encoding/json/encode.go. -func isEmptyValue(v reflect.Value) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - if v.IsNil() { - return true - } - return isEmptyValue(v.Elem()) - case reflect.Func: - return v.IsNil() - case reflect.Invalid: - return true - } - return false -} - -func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) { - if dst == nil || src == nil { - err = ErrNilArguments - return - } - vDst = reflect.ValueOf(dst).Elem() - if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map { - err = ErrNotSupported - return - } - vSrc = reflect.ValueOf(src) - // We check if vSrc is a pointer to dereference it. - if vSrc.Kind() == reflect.Ptr { - vSrc = vSrc.Elem() - } - return -} - -// Traverses recursively both values, assigning src's fields values to dst. -// The map argument tracks comparisons that have already been seen, which allows -// short circuiting on recursive types. -func deeper(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) { - if dst.CanAddr() { - addr := dst.UnsafeAddr() - h := 17 * addr - seen := visited[h] - typ := dst.Type() - for p := seen; p != nil; p = p.next { - if p.ptr == addr && p.typ == typ { - return nil - } - } - // Remember, remember... - visited[h] = &visit{addr, typ, seen} - } - return // TODO refactor -} diff --git a/cmd/bpa-operator/vendor/github.com/joho/godotenv/.gitignore b/cmd/bpa-operator/vendor/github.com/joho/godotenv/.gitignore deleted file mode 100644 index e43b0f9..0000000 --- a/cmd/bpa-operator/vendor/github.com/joho/godotenv/.gitignore +++ /dev/null @@ -1 +0,0 @@ -.DS_Store diff --git a/cmd/bpa-operator/vendor/github.com/joho/godotenv/.travis.yml b/cmd/bpa-operator/vendor/github.com/joho/godotenv/.travis.yml deleted file mode 100644 index f0db1ad..0000000 --- a/cmd/bpa-operator/vendor/github.com/joho/godotenv/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ -language: go - -go: - - 1.x - -os: - - linux - - osx diff --git a/cmd/bpa-operator/vendor/github.com/joho/godotenv/LICENCE b/cmd/bpa-operator/vendor/github.com/joho/godotenv/LICENCE deleted file mode 100644 index e7ddd51..0000000 --- a/cmd/bpa-operator/vendor/github.com/joho/godotenv/LICENCE +++ /dev/null @@ -1,23 +0,0 @@ -Copyright (c) 2013 John Barton - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - diff --git a/cmd/bpa-operator/vendor/github.com/joho/godotenv/README.md b/cmd/bpa-operator/vendor/github.com/joho/godotenv/README.md deleted file mode 100644 index 4e8fcf2..0000000 --- a/cmd/bpa-operator/vendor/github.com/joho/godotenv/README.md +++ /dev/null @@ -1,163 +0,0 @@ -# GoDotEnv [![Build Status](https://travis-ci.org/joho/godotenv.svg?branch=master)](https://travis-ci.org/joho/godotenv) [![Build status](https://ci.appveyor.com/api/projects/status/9v40vnfvvgde64u4?svg=true)](https://ci.appveyor.com/project/joho/godotenv) [![Go Report Card](https://goreportcard.com/badge/github.com/joho/godotenv)](https://goreportcard.com/report/github.com/joho/godotenv) - -A Go (golang) port of the Ruby dotenv project (which loads env vars from a .env file) - -From the original Library: - -> Storing configuration in the environment is one of the tenets of a twelve-factor app. Anything that is likely to change between deployment environments–such as resource handles for databases or credentials for external services–should be extracted from the code into environment variables. -> -> But it is not always practical to set environment variables on development machines or continuous integration servers where multiple projects are run. Dotenv load variables from a .env file into ENV when the environment is bootstrapped. - -It can be used as a library (for loading in env for your own daemons etc) or as a bin command. - -There is test coverage and CI for both linuxish and windows environments, but I make no guarantees about the bin version working on windows. - -## Installation - -As a library - -```shell -go get github.com/joho/godotenv -``` - -or if you want to use it as a bin command -```shell -go get github.com/joho/godotenv/cmd/godotenv -``` - -## Usage - -Add your application configuration to your `.env` file in the root of your project: - -```shell -S3_BUCKET=YOURS3BUCKET -SECRET_KEY=YOURSECRETKEYGOESHERE -``` - -Then in your Go app you can do something like - -```go -package main - -import ( - "github.com/joho/godotenv" - "log" - "os" -) - -func main() { - err := godotenv.Load() - if err != nil { - log.Fatal("Error loading .env file") - } - - s3Bucket := os.Getenv("S3_BUCKET") - secretKey := os.Getenv("SECRET_KEY") - - // now do something with s3 or whatever -} -``` - -If you're even lazier than that, you can just take advantage of the autoload package which will read in `.env` on import - -```go -import _ "github.com/joho/godotenv/autoload" -``` - -While `.env` in the project root is the default, you don't have to be constrained, both examples below are 100% legit - -```go -_ = godotenv.Load("somerandomfile") -_ = godotenv.Load("filenumberone.env", "filenumbertwo.env") -``` - -If you want to be really fancy with your env file you can do comments and exports (below is a valid env file) - -```shell -# I am a comment and that is OK -SOME_VAR=someval -FOO=BAR # comments at line end are OK too -export BAR=BAZ -``` - -Or finally you can do YAML(ish) style - -```yaml -FOO: bar -BAR: baz -``` - -as a final aside, if you don't want godotenv munging your env you can just get a map back instead - -```go -var myEnv map[string]string -myEnv, err := godotenv.Read() - -s3Bucket := myEnv["S3_BUCKET"] -``` - -... or from an `io.Reader` instead of a local file - -```go -reader := getRemoteFile() -myEnv, err := godotenv.Parse(reader) -``` - -... or from a `string` if you so desire - -```go -content := getRemoteFileContent() -myEnv, err := godotenv.Unmarshal(content) -``` - -### Command Mode - -Assuming you've installed the command as above and you've got `$GOPATH/bin` in your `$PATH` - -``` -godotenv -f /some/path/to/.env some_command with some args -``` - -If you don't specify `-f` it will fall back on the default of loading `.env` in `PWD` - -### Writing Env Files - -Godotenv can also write a map representing the environment to a correctly-formatted and escaped file - -```go -env, err := godotenv.Unmarshal("KEY=value") -err := godotenv.Write(env, "./.env") -``` - -... or to a string - -```go -env, err := godotenv.Unmarshal("KEY=value") -content, err := godotenv.Marshal(env) -``` - -## Contributing - -Contributions are most welcome! The parser itself is pretty stupidly naive and I wouldn't be surprised if it breaks with edge cases. - -*code changes without tests will not be accepted* - -1. Fork it -2. Create your feature branch (`git checkout -b my-new-feature`) -3. Commit your changes (`git commit -am 'Added some feature'`) -4. Push to the branch (`git push origin my-new-feature`) -5. Create new Pull Request - -## Releases - -Releases should follow [Semver](http://semver.org/) though the first couple of releases are `v1` and `v1.1`. - -Use [annotated tags for all releases](https://github.com/joho/godotenv/issues/30). Example `git tag -a v1.2.1` - -## CI - -Linux: [![Build Status](https://travis-ci.org/joho/godotenv.svg?branch=master)](https://travis-ci.org/joho/godotenv) Windows: [![Build status](https://ci.appveyor.com/api/projects/status/9v40vnfvvgde64u4)](https://ci.appveyor.com/project/joho/godotenv) - -## Who? - -The original library [dotenv](https://github.com/bkeepers/dotenv) was written by [Brandon Keepers](http://opensoul.org/), and this port was done by [John Barton](https://johnbarton.co/) based off the tests/fixtures in the original library. diff --git a/cmd/bpa-operator/vendor/github.com/joho/godotenv/godotenv.go b/cmd/bpa-operator/vendor/github.com/joho/godotenv/godotenv.go deleted file mode 100644 index 29b436c..0000000 --- a/cmd/bpa-operator/vendor/github.com/joho/godotenv/godotenv.go +++ /dev/null @@ -1,346 +0,0 @@ -// Package godotenv is a go port of the ruby dotenv library (https://github.com/bkeepers/dotenv) -// -// Examples/readme can be found on the github page at https://github.com/joho/godotenv -// -// The TL;DR is that you make a .env file that looks something like -// -// SOME_ENV_VAR=somevalue -// -// and then in your go code you can call -// -// godotenv.Load() -// -// and all the env vars declared in .env will be available through os.Getenv("SOME_ENV_VAR") -package godotenv - -import ( - "bufio" - "errors" - "fmt" - "io" - "os" - "os/exec" - "regexp" - "sort" - "strings" -) - -const doubleQuoteSpecialChars = "\\\n\r\"!$`" - -// Load will read your env file(s) and load them into ENV for this process. -// -// Call this function as close as possible to the start of your program (ideally in main) -// -// If you call Load without any args it will default to loading .env in the current path -// -// You can otherwise tell it which files to load (there can be more than one) like -// -// godotenv.Load("fileone", "filetwo") -// -// It's important to note that it WILL NOT OVERRIDE an env variable that already exists - consider the .env file to set dev vars or sensible defaults -func Load(filenames ...string) (err error) { - filenames = filenamesOrDefault(filenames) - - for _, filename := range filenames { - err = loadFile(filename, false) - if err != nil { - return // return early on a spazout - } - } - return -} - -// Overload will read your env file(s) and load them into ENV for this process. -// -// Call this function as close as possible to the start of your program (ideally in main) -// -// If you call Overload without any args it will default to loading .env in the current path -// -// You can otherwise tell it which files to load (there can be more than one) like -// -// godotenv.Overload("fileone", "filetwo") -// -// It's important to note this WILL OVERRIDE an env variable that already exists - consider the .env file to forcefilly set all vars. -func Overload(filenames ...string) (err error) { - filenames = filenamesOrDefault(filenames) - - for _, filename := range filenames { - err = loadFile(filename, true) - if err != nil { - return // return early on a spazout - } - } - return -} - -// Read all env (with same file loading semantics as Load) but return values as -// a map rather than automatically writing values into env -func Read(filenames ...string) (envMap map[string]string, err error) { - filenames = filenamesOrDefault(filenames) - envMap = make(map[string]string) - - for _, filename := range filenames { - individualEnvMap, individualErr := readFile(filename) - - if individualErr != nil { - err = individualErr - return // return early on a spazout - } - - for key, value := range individualEnvMap { - envMap[key] = value - } - } - - return -} - -// Parse reads an env file from io.Reader, returning a map of keys and values. -func Parse(r io.Reader) (envMap map[string]string, err error) { - envMap = make(map[string]string) - - var lines []string - scanner := bufio.NewScanner(r) - for scanner.Scan() { - lines = append(lines, scanner.Text()) - } - - if err = scanner.Err(); err != nil { - return - } - - for _, fullLine := range lines { - if !isIgnoredLine(fullLine) { - var key, value string - key, value, err = parseLine(fullLine, envMap) - - if err != nil { - return - } - envMap[key] = value - } - } - return -} - -//Unmarshal reads an env file from a string, returning a map of keys and values. -func Unmarshal(str string) (envMap map[string]string, err error) { - return Parse(strings.NewReader(str)) -} - -// Exec loads env vars from the specified filenames (empty map falls back to default) -// then executes the cmd specified. -// -// Simply hooks up os.Stdin/err/out to the command and calls Run() -// -// If you want more fine grained control over your command it's recommended -// that you use `Load()` or `Read()` and the `os/exec` package yourself. -func Exec(filenames []string, cmd string, cmdArgs []string) error { - Load(filenames...) - - command := exec.Command(cmd, cmdArgs...) - command.Stdin = os.Stdin - command.Stdout = os.Stdout - command.Stderr = os.Stderr - return command.Run() -} - -// Write serializes the given environment and writes it to a file -func Write(envMap map[string]string, filename string) error { - content, error := Marshal(envMap) - if error != nil { - return error - } - file, error := os.Create(filename) - if error != nil { - return error - } - _, err := file.WriteString(content) - return err -} - -// Marshal outputs the given environment as a dotenv-formatted environment file. -// Each line is in the format: KEY="VALUE" where VALUE is backslash-escaped. -func Marshal(envMap map[string]string) (string, error) { - lines := make([]string, 0, len(envMap)) - for k, v := range envMap { - lines = append(lines, fmt.Sprintf(`%s="%s"`, k, doubleQuoteEscape(v))) - } - sort.Strings(lines) - return strings.Join(lines, "\n"), nil -} - -func filenamesOrDefault(filenames []string) []string { - if len(filenames) == 0 { - return []string{".env"} - } - return filenames -} - -func loadFile(filename string, overload bool) error { - envMap, err := readFile(filename) - if err != nil { - return err - } - - currentEnv := map[string]bool{} - rawEnv := os.Environ() - for _, rawEnvLine := range rawEnv { - key := strings.Split(rawEnvLine, "=")[0] - currentEnv[key] = true - } - - for key, value := range envMap { - if !currentEnv[key] || overload { - os.Setenv(key, value) - } - } - - return nil -} - -func readFile(filename string) (envMap map[string]string, err error) { - file, err := os.Open(filename) - if err != nil { - return - } - defer file.Close() - - return Parse(file) -} - -func parseLine(line string, envMap map[string]string) (key string, value string, err error) { - if len(line) == 0 { - err = errors.New("zero length string") - return - } - - // ditch the comments (but keep quoted hashes) - if strings.Contains(line, "#") { - segmentsBetweenHashes := strings.Split(line, "#") - quotesAreOpen := false - var segmentsToKeep []string - for _, segment := range segmentsBetweenHashes { - if strings.Count(segment, "\"") == 1 || strings.Count(segment, "'") == 1 { - if quotesAreOpen { - quotesAreOpen = false - segmentsToKeep = append(segmentsToKeep, segment) - } else { - quotesAreOpen = true - } - } - - if len(segmentsToKeep) == 0 || quotesAreOpen { - segmentsToKeep = append(segmentsToKeep, segment) - } - } - - line = strings.Join(segmentsToKeep, "#") - } - - firstEquals := strings.Index(line, "=") - firstColon := strings.Index(line, ":") - splitString := strings.SplitN(line, "=", 2) - if firstColon != -1 && (firstColon < firstEquals || firstEquals == -1) { - //this is a yaml-style line - splitString = strings.SplitN(line, ":", 2) - } - - if len(splitString) != 2 { - err = errors.New("Can't separate key from value") - return - } - - // Parse the key - key = splitString[0] - if strings.HasPrefix(key, "export") { - key = strings.TrimPrefix(key, "export") - } - key = strings.Trim(key, " ") - - // Parse the value - value = parseValue(splitString[1], envMap) - return -} - -func parseValue(value string, envMap map[string]string) string { - - // trim - value = strings.Trim(value, " ") - - // check if we've got quoted values or possible escapes - if len(value) > 1 { - rs := regexp.MustCompile(`\A'(.*)'\z`) - singleQuotes := rs.FindStringSubmatch(value) - - rd := regexp.MustCompile(`\A"(.*)"\z`) - doubleQuotes := rd.FindStringSubmatch(value) - - if singleQuotes != nil || doubleQuotes != nil { - // pull the quotes off the edges - value = value[1 : len(value)-1] - } - - if doubleQuotes != nil { - // expand newlines - escapeRegex := regexp.MustCompile(`\\.`) - value = escapeRegex.ReplaceAllStringFunc(value, func(match string) string { - c := strings.TrimPrefix(match, `\`) - switch c { - case "n": - return "\n" - case "r": - return "\r" - default: - return match - } - }) - // unescape characters - e := regexp.MustCompile(`\\([^$])`) - value = e.ReplaceAllString(value, "$1") - } - - if singleQuotes == nil { - value = expandVariables(value, envMap) - } - } - - return value -} - -func expandVariables(v string, m map[string]string) string { - r := regexp.MustCompile(`(\\)?(\$)(\()?\{?([A-Z0-9_]+)?\}?`) - - return r.ReplaceAllStringFunc(v, func(s string) string { - submatch := r.FindStringSubmatch(s) - - if submatch == nil { - return s - } - if submatch[1] == "\\" || submatch[2] == "(" { - return submatch[0][1:] - } else if submatch[4] != "" { - return m[submatch[4]] - } - return s - }) -} - -func isIgnoredLine(line string) bool { - trimmedLine := strings.Trim(line, " \n\t") - return len(trimmedLine) == 0 || strings.HasPrefix(trimmedLine, "#") -} - -func doubleQuoteEscape(line string) string { - for _, c := range doubleQuoteSpecialChars { - toReplace := "\\" + string(c) - if c == '\n' { - toReplace = `\n` - } - if c == '\r' { - toReplace = `\r` - } - line = strings.Replace(line, string(c), toReplace, -1) - } - return line -} diff --git a/cmd/bpa-operator/vendor/github.com/json-iterator/go/.codecov.yml b/cmd/bpa-operator/vendor/github.com/json-iterator/go/.codecov.yml deleted file mode 100644 index 955dc0b..0000000 --- a/cmd/bpa-operator/vendor/github.com/json-iterator/go/.codecov.yml +++ /dev/null @@ -1,3 +0,0 @@ -ignore: - - "output_tests/.*" - diff --git a/cmd/bpa-operator/vendor/github.com/json-iterator/go/.gitignore b/cmd/bpa-operator/vendor/github.com/json-iterator/go/.gitignore deleted file mode 100644 index 1555653..0000000 --- a/cmd/bpa-operator/vendor/github.com/json-iterator/go/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -/vendor -/bug_test.go -/coverage.txt -/.idea diff --git a/cmd/bpa-operator/vendor/github.com/json-iterator/go/.travis.yml b/cmd/bpa-operator/vendor/github.com/json-iterator/go/.travis.yml deleted file mode 100644 index 449e67c..0000000 --- a/cmd/bpa-operator/vendor/github.com/json-iterator/go/.travis.yml +++ /dev/null @@ -1,14 +0,0 @@ -language: go - -go: - - 1.8.x - - 1.x - -before_install: - - go get -t -v ./... - -script: - - ./test.sh - -after_success: - - bash <(curl -s https://codecov.io/bash) diff --git a/cmd/bpa-operator/vendor/github.com/json-iterator/go/Gopkg.lock b/cmd/bpa-operator/vendor/github.com/json-iterator/go/Gopkg.lock deleted file mode 100644 index c8a9fbb..0000000 --- a/cmd/bpa-operator/vendor/github.com/json-iterator/go/Gopkg.lock +++ /dev/null @@ -1,21 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - name = "github.com/modern-go/concurrent" - packages = ["."] - revision = "e0a39a4cb4216ea8db28e22a69f4ec25610d513a" - version = "1.0.0" - -[[projects]] - name = "github.com/modern-go/reflect2" - packages = ["."] - revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd" - version = "1.0.1" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - inputs-digest = "ea54a775e5a354cb015502d2e7aa4b74230fc77e894f34a838b268c25ec8eeb8" - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/cmd/bpa-operator/vendor/github.com/json-iterator/go/Gopkg.toml b/cmd/bpa-operator/vendor/github.com/json-iterator/go/Gopkg.toml deleted file mode 100644 index 313a0f8..0000000 --- a/cmd/bpa-operator/vendor/github.com/json-iterator/go/Gopkg.toml +++ /dev/null @@ -1,26 +0,0 @@ -# Gopkg.toml example -# -# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md -# for detailed Gopkg.toml documentation. -# -# required = ["github.com/user/thing/cmd/thing"] -# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] -# -# [[constraint]] -# name = "github.com/user/project" -# version = "1.0.0" -# -# [[constraint]] -# name = "github.com/user/project2" -# branch = "dev" -# source = "github.com/myfork/project2" -# -# [[override]] -# name = "github.com/x/y" -# version = "2.4.0" - -ignored = ["github.com/davecgh/go-spew*","github.com/google/gofuzz*","github.com/stretchr/testify*"] - -[[constraint]] - name = "github.com/modern-go/reflect2" - version = "1.0.1" diff --git a/cmd/bpa-operator/vendor/github.com/json-iterator/go/LICENSE b/cmd/bpa-operator/vendor/github.com/json-iterator/go/LICENSE deleted file mode 100644 index 2cf4f5a..0000000 --- a/cmd/bpa-operator/vendor/github.com/json-iterator/go/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2016 json-iterator - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/cmd/bpa-operator/vendor/github.com/json-iterator/go/README.md b/cmd/bpa-operator/vendor/github.com/json-iterator/go/README.md deleted file mode 100644 index 50d56ff..0000000 --- a/cmd/bpa-operator/vendor/github.com/json-iterator/go/README.md +++ /dev/null @@ -1,87 +0,0 @@ -[![Sourcegraph](https://sourcegraph.com/github.com/json-iterator/go/-/badge.svg)](https://sourcegraph.com/github.com/json-iterator/go?badge) -[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/json-iterator/go) -[![Build Status](https://travis-ci.org/json-iterator/go.svg?branch=master)](https://travis-ci.org/json-iterator/go) -[![codecov](https://codecov.io/gh/json-iterator/go/branch/master/graph/badge.svg)](https://codecov.io/gh/json-iterator/go) -[![rcard](https://goreportcard.com/badge/github.com/json-iterator/go)](https://goreportcard.com/report/github.com/json-iterator/go) -[![License](http://img.shields.io/badge/license-mit-blue.svg?style=flat-square)](https://raw.githubusercontent.com/json-iterator/go/master/LICENSE) -[![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby) - -A high-performance 100% compatible drop-in replacement of "encoding/json" - -You can also use thrift like JSON using [thrift-iterator](https://github.com/thrift-iterator/go) - -# Benchmark - -![benchmark](http://jsoniter.com/benchmarks/go-benchmark.png) - -Source code: https://github.com/json-iterator/go-benchmark/blob/master/src/github.com/json-iterator/go-benchmark/benchmark_medium_payload_test.go - -Raw Result (easyjson requires static code generation) - -| | ns/op | allocation bytes | allocation times | -| --- | --- | --- | --- | -| std decode | 35510 ns/op | 1960 B/op | 99 allocs/op | -| easyjson decode | 8499 ns/op | 160 B/op | 4 allocs/op | -| jsoniter decode | 5623 ns/op | 160 B/op | 3 allocs/op | -| std encode | 2213 ns/op | 712 B/op | 5 allocs/op | -| easyjson encode | 883 ns/op | 576 B/op | 3 allocs/op | -| jsoniter encode | 837 ns/op | 384 B/op | 4 allocs/op | - -Always benchmark with your own workload. -The result depends heavily on the data input. - -# Usage - -100% compatibility with standard lib - -Replace - -```go -import "encoding/json" -json.Marshal(&data) -``` - -with - -```go -import "github.com/json-iterator/go" - -var json = jsoniter.ConfigCompatibleWithStandardLibrary -json.Marshal(&data) -``` - -Replace - -```go -import "encoding/json" -json.Unmarshal(input, &data) -``` - -with - -```go -import "github.com/json-iterator/go" - -var json = jsoniter.ConfigCompatibleWithStandardLibrary -json.Unmarshal(input, &data) -``` - -[More documentation](http://jsoniter.com/migrate-from-go-std.html) - -# How to get - -``` -go get github.com/json-iterator/go -``` - -# Contribution Welcomed ! - -Contributors - -* [thockin](https://github.com/thockin) -* [mattn](https://github.com/mattn) -* [cch123](https://github.com/cch123) -* [Oleg Shaldybin](https://github.com/olegshaldybin) -* [Jason Toffaletti](https://github.com/toffaletti) - -Report issue or pull request, or email taowen@gmail.com, or [![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby) diff --git a/cmd/bpa-operator/vendor/github.com/json-iterator/go/adapter.go b/cmd/bpa-operator/vendor/github.com/json-iterator/go/adapter.go deleted file mode 100644 index e674d0f..0000000 --- a/cmd/bpa-operator/vendor/github.com/json-iterator/go/adapter.go +++ /dev/null @@ -1,150 +0,0 @@ -package jsoniter - -import ( - "bytes" - "io" -) - -// RawMessage to make replace json with jsoniter -type RawMessage []byte - -// Unmarshal adapts to json/encoding Unmarshal API -// -// Unmarshal parses the JSON-encoded data and stores the result in the value pointed to by v. -// Refer to https://godoc.org/encoding/json#Unmarshal for more information -func Unmarshal(data []byte, v interface{}) error { - return ConfigDefault.Unmarshal(data, v) -} - -// UnmarshalFromString convenient method to read from string instead of []byte -func UnmarshalFromString(str string, v interface{}) error { - return ConfigDefault.UnmarshalFromString(str, v) -} - -// Get quick method to get value from deeply nested JSON structure -func Get(data []byte, path ...interface{}) Any { - return ConfigDefault.Get(data, path...) -} - -// Marshal adapts to json/encoding Marshal API -// -// Marshal returns the JSON encoding of v, adapts to json/encoding Marshal API -// Refer to https://godoc.org/encoding/json#Marshal for more information -func Marshal(v interface{}) ([]byte, error) { - return ConfigDefault.Marshal(v) -} - -// MarshalIndent same as json.MarshalIndent. Prefix is not supported. -func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { - return ConfigDefault.MarshalIndent(v, prefix, indent) -} - -// MarshalToString convenient method to write as string instead of []byte -func MarshalToString(v interface{}) (string, error) { - return ConfigDefault.MarshalToString(v) -} - -// NewDecoder adapts to json/stream NewDecoder API. -// -// NewDecoder returns a new decoder that reads from r. -// -// Instead of a json/encoding Decoder, an Decoder is returned -// Refer to https://godoc.org/encoding/json#NewDecoder for more information -func NewDecoder(reader io.Reader) *Decoder { - return ConfigDefault.NewDecoder(reader) -} - -// Decoder reads and decodes JSON values from an input stream. -// Decoder provides identical APIs with json/stream Decoder (Token() and UseNumber() are in progress) -type Decoder struct { - iter *Iterator -} - -// Decode decode JSON into interface{} -func (adapter *Decoder) Decode(obj interface{}) error { - if adapter.iter.head == adapter.iter.tail && adapter.iter.reader != nil { - if !adapter.iter.loadMore() { - return io.EOF - } - } - adapter.iter.ReadVal(obj) - err := adapter.iter.Error - if err == io.EOF { - return nil - } - return adapter.iter.Error -} - -// More is there more? -func (adapter *Decoder) More() bool { - iter := adapter.iter - if iter.Error != nil { - return false - } - c := iter.nextToken() - if c == 0 { - return false - } - iter.unreadByte() - return c != ']' && c != '}' -} - -// Buffered remaining buffer -func (adapter *Decoder) Buffered() io.Reader { - remaining := adapter.iter.buf[adapter.iter.head:adapter.iter.tail] - return bytes.NewReader(remaining) -} - -// UseNumber causes the Decoder to unmarshal a number into an interface{} as a -// Number instead of as a float64. -func (adapter *Decoder) UseNumber() { - cfg := adapter.iter.cfg.configBeforeFrozen - cfg.UseNumber = true - adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions) -} - -// DisallowUnknownFields causes the Decoder to return an error when the destination -// is a struct and the input contains object keys which do not match any -// non-ignored, exported fields in the destination. -func (adapter *Decoder) DisallowUnknownFields() { - cfg := adapter.iter.cfg.configBeforeFrozen - cfg.DisallowUnknownFields = true - adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions) -} - -// NewEncoder same as json.NewEncoder -func NewEncoder(writer io.Writer) *Encoder { - return ConfigDefault.NewEncoder(writer) -} - -// Encoder same as json.Encoder -type Encoder struct { - stream *Stream -} - -// Encode encode interface{} as JSON to io.Writer -func (adapter *Encoder) Encode(val interface{}) error { - adapter.stream.WriteVal(val) - adapter.stream.WriteRaw("\n") - adapter.stream.Flush() - return adapter.stream.Error -} - -// SetIndent set the indention. Prefix is not supported -func (adapter *Encoder) SetIndent(prefix, indent string) { - config := adapter.stream.cfg.configBeforeFrozen - config.IndentionStep = len(indent) - adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions) -} - -// SetEscapeHTML escape html by default, set to false to disable -func (adapter *Encoder) SetEscapeHTML(escapeHTML bool) { - config := adapter.stream.cfg.configBeforeFrozen - config.EscapeHTML = escapeHTML - adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions) -} - -// Valid reports whether data is a valid JSON encoding. -func Valid(data []byte) bool { - return ConfigDefault.Valid(data) -} diff --git a/cmd/bpa-operator/vendor/github.com/json-iterator/go/any.go b/cmd/bpa-operator/vendor/github.com/json-iterator/go/any.go deleted file mode 100644 index f6b8aea..0000000 --- a/cmd/bpa-operator/vendor/github.com/json-iterator/go/any.go +++ /dev/null @@ -1,325 +0,0 @@ -package jsoniter - -import ( - "errors" - "fmt" - "github.com/modern-go/reflect2" - "io" - "reflect" - "strconv" - "unsafe" -) - -// Any generic object representation. -// The lazy json implementation holds []byte and parse lazily. -type Any interface { - LastError() error - ValueType() ValueType - MustBeValid() Any - ToBool() bool - ToInt() int - ToInt32() int32 - ToInt64() int64 - ToUint() uint - ToUint32() uint32 - ToUint64() uint64 - ToFloat32() float32 - ToFloat64() float64 - ToString() string - ToVal(val interface{}) - Get(path ...interface{}) Any - Size() int - Keys() []string - GetInterface() interface{} - WriteTo(stream *Stream) -} - -type baseAny struct{} - -func (any *baseAny) Get(path ...interface{}) Any { - return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)} -} - -func (any *baseAny) Size() int { - return 0 -} - -func (any *baseAny) Keys() []string { - return []string{} -} - -func (any *baseAny) ToVal(obj interface{}) { - panic("not implemented") -} - -// WrapInt32 turn int32 into Any interface -func WrapInt32(val int32) Any { - return &int32Any{baseAny{}, val} -} - -// WrapInt64 turn int64 into Any interface -func WrapInt64(val int64) Any { - return &int64Any{baseAny{}, val} -} - -// WrapUint32 turn uint32 into Any interface -func WrapUint32(val uint32) Any { - return &uint32Any{baseAny{}, val} -} - -// WrapUint64 turn uint64 into Any interface -func WrapUint64(val uint64) Any { - return &uint64Any{baseAny{}, val} -} - -// WrapFloat64 turn float64 into Any interface -func WrapFloat64(val float64) Any { - return &floatAny{baseAny{}, val} -} - -// WrapString turn string into Any interface -func WrapString(val string) Any { - return &stringAny{baseAny{}, val} -} - -// Wrap turn a go object into Any interface -func Wrap(val interface{}) Any { - if val == nil { - return &nilAny{} - } - asAny, isAny := val.(Any) - if isAny { - return asAny - } - typ := reflect2.TypeOf(val) - switch typ.Kind() { - case reflect.Slice: - return wrapArray(val) - case reflect.Struct: - return wrapStruct(val) - case reflect.Map: - return wrapMap(val) - case reflect.String: - return WrapString(val.(string)) - case reflect.Int: - if strconv.IntSize == 32 { - return WrapInt32(int32(val.(int))) - } - return WrapInt64(int64(val.(int))) - case reflect.Int8: - return WrapInt32(int32(val.(int8))) - case reflect.Int16: - return WrapInt32(int32(val.(int16))) - case reflect.Int32: - return WrapInt32(val.(int32)) - case reflect.Int64: - return WrapInt64(val.(int64)) - case reflect.Uint: - if strconv.IntSize == 32 { - return WrapUint32(uint32(val.(uint))) - } - return WrapUint64(uint64(val.(uint))) - case reflect.Uintptr: - if ptrSize == 32 { - return WrapUint32(uint32(val.(uintptr))) - } - return WrapUint64(uint64(val.(uintptr))) - case reflect.Uint8: - return WrapUint32(uint32(val.(uint8))) - case reflect.Uint16: - return WrapUint32(uint32(val.(uint16))) - case reflect.Uint32: - return WrapUint32(uint32(val.(uint32))) - case reflect.Uint64: - return WrapUint64(val.(uint64)) - case reflect.Float32: - return WrapFloat64(float64(val.(float32))) - case reflect.Float64: - return WrapFloat64(val.(float64)) - case reflect.Bool: - if val.(bool) == true { - return &trueAny{} - } - return &falseAny{} - } - return &invalidAny{baseAny{}, fmt.Errorf("unsupported type: %v", typ)} -} - -// ReadAny read next JSON element as an Any object. It is a better json.RawMessage. -func (iter *Iterator) ReadAny() Any { - return iter.readAny() -} - -func (iter *Iterator) readAny() Any { - c := iter.nextToken() - switch c { - case '"': - iter.unreadByte() - return &stringAny{baseAny{}, iter.ReadString()} - case 'n': - iter.skipThreeBytes('u', 'l', 'l') // null - return &nilAny{} - case 't': - iter.skipThreeBytes('r', 'u', 'e') // true - return &trueAny{} - case 'f': - iter.skipFourBytes('a', 'l', 's', 'e') // false - return &falseAny{} - case '{': - return iter.readObjectAny() - case '[': - return iter.readArrayAny() - case '-': - return iter.readNumberAny(false) - case 0: - return &invalidAny{baseAny{}, errors.New("input is empty")} - default: - return iter.readNumberAny(true) - } -} - -func (iter *Iterator) readNumberAny(positive bool) Any { - iter.startCapture(iter.head - 1) - iter.skipNumber() - lazyBuf := iter.stopCapture() - return &numberLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} -} - -func (iter *Iterator) readObjectAny() Any { - iter.startCapture(iter.head - 1) - iter.skipObject() - lazyBuf := iter.stopCapture() - return &objectLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} -} - -func (iter *Iterator) readArrayAny() Any { - iter.startCapture(iter.head - 1) - iter.skipArray() - lazyBuf := iter.stopCapture() - return &arrayLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} -} - -func locateObjectField(iter *Iterator, target string) []byte { - var found []byte - iter.ReadObjectCB(func(iter *Iterator, field string) bool { - if field == target { - found = iter.SkipAndReturnBytes() - return false - } - iter.Skip() - return true - }) - return found -} - -func locateArrayElement(iter *Iterator, target int) []byte { - var found []byte - n := 0 - iter.ReadArrayCB(func(iter *Iterator) bool { - if n == target { - found = iter.SkipAndReturnBytes() - return false - } - iter.Skip() - n++ - return true - }) - return found -} - -func locatePath(iter *Iterator, path []interface{}) Any { - for i, pathKeyObj := range path { - switch pathKey := pathKeyObj.(type) { - case string: - valueBytes := locateObjectField(iter, pathKey) - if valueBytes == nil { - return newInvalidAny(path[i:]) - } - iter.ResetBytes(valueBytes) - case int: - valueBytes := locateArrayElement(iter, pathKey) - if valueBytes == nil { - return newInvalidAny(path[i:]) - } - iter.ResetBytes(valueBytes) - case int32: - if '*' == pathKey { - return iter.readAny().Get(path[i:]...) - } - return newInvalidAny(path[i:]) - default: - return newInvalidAny(path[i:]) - } - } - if iter.Error != nil && iter.Error != io.EOF { - return &invalidAny{baseAny{}, iter.Error} - } - return iter.readAny() -} - -var anyType = reflect2.TypeOfPtr((*Any)(nil)).Elem() - -func createDecoderOfAny(ctx *ctx, typ reflect2.Type) ValDecoder { - if typ == anyType { - return &directAnyCodec{} - } - if typ.Implements(anyType) { - return &anyCodec{ - valType: typ, - } - } - return nil -} - -func createEncoderOfAny(ctx *ctx, typ reflect2.Type) ValEncoder { - if typ == anyType { - return &directAnyCodec{} - } - if typ.Implements(anyType) { - return &anyCodec{ - valType: typ, - } - } - return nil -} - -type anyCodec struct { - valType reflect2.Type -} - -func (codec *anyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - panic("not implemented") -} - -func (codec *anyCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - obj := codec.valType.UnsafeIndirect(ptr) - any := obj.(Any) - any.WriteTo(stream) -} - -func (codec *anyCodec) IsEmpty(ptr unsafe.Pointer) bool { - obj := codec.valType.UnsafeIndirect(ptr) - any := obj.(Any) - return any.Size() == 0 -} - -type directAnyCodec struct { -} - -func (codec *directAnyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *(*Any)(ptr) = iter.readAny() -} - -func (codec *directAnyCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - any := *(*Any)(ptr) - if any == nil { - stream.WriteNil() - return - } - any.WriteTo(stream) -} - -func (codec *directAnyCodec) IsEmpty(ptr unsafe.Pointer) bool { - any := *(*Any)(ptr) - return any.Size() == 0 -} diff --git a/cmd/bpa-operator/vendor/github.com/json-iterator/go/any_array.go b/cmd/bpa-operator/vendor/github.com/json-iterator/go/any_array.go deleted file mode 100644 index 0449e9a..0000000 --- a/cmd/bpa-operator/vendor/github.com/json-iterator/go/any_array.go +++ /dev/null @@ -1,278 +0,0 @@ -package jsoniter - -import ( - "reflect" - "unsafe" -) - -type arrayLazyAny struct { - baseAny - cfg *frozenConfig - buf []byte - err error -} - -func (any *arrayLazyAny) ValueType() ValueType { - return ArrayValue -} - -func (any *arrayLazyAny) MustBeValid() Any { - return any -} - -func (any *arrayLazyAny) LastError() error { - return any.err -} - -func (any *arrayLazyAny) ToBool() bool { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - return iter.ReadArray() -} - -func (any *arrayLazyAny) ToInt() int { - if any.ToBool() { - return 1 - } - return 0 -} - -func (any *arrayLazyAny) ToInt32() int32 { - if any.ToBool() { - return 1 - } - return 0 -} - -func (any *arrayLazyAny) ToInt64() int64 { - if any.ToBool() { - return 1 - } - return 0 -} - -func (any *arrayLazyAny) ToUint() uint { - if any.ToBool() { - return 1 - } - return 0 -} - -func (any *arrayLazyAny) ToUint32() uint32 { - if any.ToBool() { - return 1 - } - return 0 -} - -func (any *arrayLazyAny) ToUint64() uint64 { - if any.ToBool() { - return 1 - } - return 0 -} - -func (any *arrayLazyAny) ToFloat32() float32 { - if any.ToBool() { - return 1 - } - return 0 -} - -func (any *arrayLazyAny) ToFloat64() float64 { - if any.ToBool() { - return 1 - } - return 0 -} - -func (any *arrayLazyAny) ToString() string { - return *(*string)(unsafe.Pointer(&any.buf)) -} - -func (any *arrayLazyAny) ToVal(val interface{}) { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - iter.ReadVal(val) -} - -func (any *arrayLazyAny) Get(path ...interface{}) Any { - if len(path) == 0 { - return any - } - switch firstPath := path[0].(type) { - case int: - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - valueBytes := locateArrayElement(iter, firstPath) - if valueBytes == nil { - return newInvalidAny(path) - } - iter.ResetBytes(valueBytes) - return locatePath(iter, path[1:]) - case int32: - if '*' == firstPath { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - arr := make([]Any, 0) - iter.ReadArrayCB(func(iter *Iterator) bool { - found := iter.readAny().Get(path[1:]...) - if found.ValueType() != InvalidValue { - arr = append(arr, found) - } - return true - }) - return wrapArray(arr) - } - return newInvalidAny(path) - default: - return newInvalidAny(path) - } -} - -func (any *arrayLazyAny) Size() int { - size := 0 - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - iter.ReadArrayCB(func(iter *Iterator) bool { - size++ - iter.Skip() - return true - }) - return size -} - -func (any *arrayLazyAny) WriteTo(stream *Stream) { - stream.Write(any.buf) -} - -func (any *arrayLazyAny) GetInterface() interface{} { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - return iter.Read() -} - -type arrayAny struct { - baseAny - val reflect.Value -} - -func wrapArray(val interface{}) *arrayAny { - return &arrayAny{baseAny{}, reflect.ValueOf(val)} -} - -func (any *arrayAny) ValueType() ValueType { - return ArrayValue -} - -func (any *arrayAny) MustBeValid() Any { - return any -} - -func (any *arrayAny) LastError() error { - return nil -} - -func (any *arrayAny) ToBool() bool { - return any.val.Len() != 0 -} - -func (any *arrayAny) ToInt() int { - if any.val.Len() == 0 { - return 0 - } - return 1 -} - -func (any *arrayAny) ToInt32() int32 { - if any.val.Len() == 0 { - return 0 - } - return 1 -} - -func (any *arrayAny) ToInt64() int64 { - if any.val.Len() == 0 { - return 0 - } - return 1 -} - -func (any *arrayAny) ToUint() uint { - if any.val.Len() == 0 { - return 0 - } - return 1 -} - -func (any *arrayAny) ToUint32() uint32 { - if any.val.Len() == 0 { - return 0 - } - return 1 -} - -func (any *arrayAny) ToUint64() uint64 { - if any.val.Len() == 0 { - return 0 - } - return 1 -} - -func (any *arrayAny) ToFloat32() float32 { - if any.val.Len() == 0 { - return 0 - } - return 1 -} - -func (any *arrayAny) ToFloat64() float64 { - if any.val.Len() == 0 { - return 0 - } - return 1 -} - -func (any *arrayAny) ToString() string { - str, _ := MarshalToString(any.val.Interface()) - return str -} - -func (any *arrayAny) Get(path ...interface{}) Any { - if len(path) == 0 { - return any - } - switch firstPath := path[0].(type) { - case int: - if firstPath < 0 || firstPath >= any.val.Len() { - return newInvalidAny(path) - } - return Wrap(any.val.Index(firstPath).Interface()) - case int32: - if '*' == firstPath { - mappedAll := make([]Any, 0) - for i := 0; i < any.val.Len(); i++ { - mapped := Wrap(any.val.Index(i).Interface()).Get(path[1:]...) - if mapped.ValueType() != InvalidValue { - mappedAll = append(mappedAll, mapped) - } - } - return wrapArray(mappedAll) - } - return newInvalidAny(path) - default: - return newInvalidAny(path) - } -} - -func (any *arrayAny) Size() int { - return any.val.Len() -} - -func (any *arrayAny) WriteTo(stream *Stream) { - stream.WriteVal(any.val) -} - -func (any *arrayAny) GetInterface() interface{} { - return any.val.Interface() -} diff --git a/cmd/bpa-operator/vendor/github.com/json-iterator/go/any_bool.go b/cmd/bpa-operator/vendor/github.com/json-iterator/go/any_bool.go deleted file mode 100644 index 9452324..0000000 --- a/cmd/bpa-operator/vendor/github.com/json-iterator/go/any_bool.go +++ /dev/null @@ -1,137 +0,0 @@ -package jsoniter - -type trueAny struct { - baseAny -} - -func (any *trueAny) LastError() error { - return nil -} - -func (any *trueAny) ToBool() bool { - return true -} - -func (any *trueAny) ToInt() int { - return 1 -} - -func (any *trueAny) ToInt32() int32 { - return 1 -} - -func (any *trueAny) ToInt64() int64 { - return 1 -} - -func (any *trueAny) ToUint() uint { - return 1 -} - -func (any *trueAny) ToUint32() uint32 { - return 1 -} - -func (any *trueAny) ToUint64() uint64 { - return 1 -} - -func (any *trueAny) ToFloat32() float32 { - return 1 -} - -func (any *trueAny) ToFloat64() float64 { - return 1 -} - -func (any *trueAny) ToString() string { - return "true" -} - -func (any *trueAny) WriteTo(stream *Stream) { - stream.WriteTrue() -} - -func (any *trueAny) Parse() *Iterator { - return nil -} - -func (any *trueAny) GetInterface() interface{} { - return true -} - -func (any *trueAny) ValueType() ValueType { - return BoolValue -} - -func (any *trueAny) MustBeValid() Any { - return any -} - -type falseAny struct { - baseAny -} - -func (any *falseAny) LastError() error { - return nil -} - -func (any *falseAny) ToBool() bool { - return false -} - -func (any *falseAny) ToInt() int { - return 0 -} - -func (any *falseAny) ToInt32() int32 { - return 0 -} - -func (any *falseAny) ToInt64() int64 { - return 0 -} - -func (any *falseAny) ToUint() uint { - return 0 -} - -func (any *falseAny) ToUint32() uint32 { - return 0 -} - -func (any *falseAny) ToUint64() uint64 { - return 0 -} - -func (any *falseAny) ToFloat32() float32 { - return 0 -} - -func (any *falseAny) ToFloat64() float64 { - return 0 -} - -func (any *falseAny) ToString() string { - return "false" -} - -func (any *falseAny) WriteTo(stream *Stream) { - stream.WriteFalse() -} - -func (any *falseAny) Parse() *Iterator { - return nil -} - -func (any *falseAny) GetInterface() interface{} { - return false -} - -func (any *falseAny) ValueType() ValueType { - return BoolValue -} - -func (any *falseAny) MustBeValid() Any { - return any -} diff --git a/cmd/bpa-operator/vendor/github.com/json-iterator/go/any_float.go b/cmd/bpa-operator/vendor/github.com/json-iterator/go/any_float.go deleted file mode 100644 index 35fdb09..0000000 --- a/cmd/bpa-operator/vendor/github.com/json-iterator/go/any_float.go +++ /dev/null @@ -1,83 +0,0 @@ -package jsoniter - -import ( - "strconv" -) - -type floatAny struct { - baseAny - val float64 -} - -func (any *floatAny) Parse() *Iterator { - return nil -} - -func (any *floatAny) ValueType() ValueType { - return NumberValue -} - -func (any *floatAny) MustBeValid() Any { - return any -} - -func (any *floatAny) LastError() error { - return nil -} - -func (any *floatAny) ToBool() bool { - return any.ToFloat64() != 0 -} - -func (any *floatAny) ToInt() int { - return int(any.val) -} - -func (any *floatAny) ToInt32() int32 { - return int32(any.val) -} - -func (any *floatAny) ToInt64() int64 { - return int64(any.val) -} - -func (any *floatAny) ToUint() uint { - if any.val > 0 { - return uint(any.val) - } - return 0 -} - -func (any *floatAny) ToUint32() uint32 { - if any.val > 0 { - return uint32(any.val) - } - return 0 -} - -func (any *floatAny) ToUint64() uint64 { - if any.val > 0 { - return uint64(any.val) - } - return 0 -} - -func (any *floatAny) ToFloat32() float32 { - return float32(any.val) -} - -func (any *floatAny) ToFloat64() float64 { - return any.val -} - -func (any *floatAny) ToString() string { - return strconv.FormatFloat(any.val, 'E', -1, 64) -} - -func (any *floatAny) WriteTo(stream *Stream) { - stream.WriteFloat64(any.val) -} - -func (any *floatAny) GetInterface() interface{} { - return any.val -} diff --git a/cmd/bpa-operator/vendor/github.com/json-iterator/go/any_int32.go b/cmd/bpa-operator/vendor/github.com/json-iterator/go/any_int32.go deleted file mode 100644 index 1b56f39..0000000 --- a/cmd/bpa-operator/vendor/github.com/json-iterator/go/any_int32.go +++ /dev/null @@ -1,74 +0,0 @@ -package jsoniter - -import ( - "strconv" -) - -type int32Any struct { - baseAny - val int32 -} - -func (any *int32Any) LastError() error { - return nil -} - -func (any *int32Any) ValueType() ValueType { - return NumberValue -} - -func (any *int32Any) MustBeValid() Any { - return any -} - -func (any *int32Any) ToBool() bool { - return any.val != 0 -} - -func (any *int32Any) ToInt() int { - return int(any.val) -} - -func (any *int32Any) ToInt32() int32 { - return any.val -} - -func (any *int32Any) ToInt64() int64 { - return int64(any.val) -} - -func (any *int32Any) ToUint() uint { - return uint(any.val) -} - -func (any *int32Any) ToUint32() uint32 { - return uint32(any.val) -} - -func (any *int32Any) ToUint64() uint64 { - return uint64(any.val) -} - -func (any *int32Any) ToFloat32() float32 { - return float32(any.val) -} - -func (any *int32Any) ToFloat64() float64 { - return float64(any.val) -} - -func (any *int32Any) ToString() string { - return strconv.FormatInt(int64(any.val), 10) -} - -func (any *int32Any) WriteTo(stream *Stream) { - stream.WriteInt32(any.val) -} - -func (any *int32Any) Parse() *Iterator { - return nil -} - -func (any *int32Any) GetInterface() interface{} { - return any.val -} diff --git a/cmd/bpa-operator/vendor/github.com/json-iterator/go/any_int64.go b/cmd/bpa-operator/vendor/github.com/json-iterator/go/any_int64.go deleted file mode 100644 index c440d72..0000000 --- a/cmd/bpa-operator/vendor/github.com/json-iterator/go/any_int64.go +++ /dev/null @@ -1,74 +0,0 @@ -package jsoniter - -import ( - "strconv" -) - -type int64Any struct { - baseAny - val int64 -} - -func (any *int64Any) LastError() error { - return nil -} - -func (any *int64Any) ValueType() ValueType { - return NumberValue -} - -func (any *int64Any) MustBeValid() Any { - return any -} - -func (any *int64Any) ToBool() bool { - return any.val != 0 -} - -func (any *int64Any) ToInt() int { - return int(any.val) -} - -func (any *int64Any) ToInt32() int32 { - return int32(any.val) -} - -func (any *int64Any) ToInt64() int64 { - return any.val -} - -func (any *int64Any) ToUint() uint { - return uint(any.val) -} - -func (any *int64Any) ToUint32() uint32 { - return uint32(any.val) -} - -func (any *int64Any) ToUint64() uint64 { - return uint64(any.val) -} - -func (any *int64Any) ToFloat32() float32 { - return float32(any.val) -} - -func (any *int64Any) ToFloat64() float64 { - return float64(any.val) -} - -func (any *int64Any) ToString() string { - return strconv.FormatInt(any.val, 10) -} - -func (any *int64Any) WriteTo(stream *Stream) { - stream.WriteInt64(any.val) -} - -func (any *int64Any) Parse() *Iterator { - return nil -} - -func (any *int64Any) GetInterface() interface{} { - return any.val -} diff --git a/cmd/bpa-operator/vendor/github.com/json-iterator/go/any_invalid.go b/cmd/bpa-operator/vendor/github.com/json-iterator/go/any_invalid.go deleted file mode 100644 index 1d859ea..0000000 --- a/cmd/bpa-operator/vendor/github.com/json-iterator/go/any_invalid.go +++ /dev/null @@ -1,82 +0,0 @@ -package jsoniter - -import "fmt" - -type invalidAny struct { - baseAny - err error -} - -func newInvalidAny(path []interface{}) *invalidAny { - return &invalidAny{baseAny{}, fmt.Errorf("%v not found", path)} -} - -func (any *invalidAny) LastError() error { - return any.err -} - -func (any *invalidAny) ValueType() ValueType { - return InvalidValue -} - -func (any *invalidAny) MustBeValid() Any { - panic(any.err) -} - -func (any *invalidAny) ToBool() bool { - return false -} - -func (any *invalidAny) ToInt() int { - return 0 -} - -func (any *invalidAny) ToInt32() int32 { - return 0 -} - -func (any *invalidAny) ToInt64() int64 { - return 0 -} - -func (any *invalidAny) ToUint() uint { - return 0 -} - -func (any *invalidAny) ToUint32() uint32 { - return 0 -} - -func (any *invalidAny) ToUint64() uint64 { - return 0 -} - -func (any *invalidAny) ToFloat32() float32 { - return 0 -} - -func (any *invalidAny) ToFloat64() float64 { - return 0 -} - -func (any *invalidAny) ToString() string { - return "" -} - -func (any *invalidAny) WriteTo(stream *Stream) { -} - -func (any *invalidAny) Get(path ...interface{}) Any { - if any.err == nil { - return &invalidAny{baseAny{}, fmt.Errorf("get %v from invalid", path)} - } - return &invalidAny{baseAny{}, fmt.Errorf("%v, get %v from invalid", any.err, path)} -} - -func (any *invalidAny) Parse() *Iterator { - return nil -} - -func (any *invalidAny) GetInterface() interface{} { - return nil -} diff --git a/cmd/bpa-operator/vendor/github.com/json-iterator/go/any_nil.go b/cmd/bpa-operator/vendor/github.com/json-iterator/go/any_nil.go deleted file mode 100644 index d04cb54..0000000 --- a/cmd/bpa-operator/vendor/github.com/json-iterator/go/any_nil.go +++ /dev/null @@ -1,69 +0,0 @@ -package jsoniter - -type nilAny struct { - baseAny -} - -func (any *nilAny) LastError() error { - return nil -} - -func (any *nilAny) ValueType() ValueType { - return NilValue -} - -func (any *nilAny) MustBeValid() Any { - return any -} - -func (any *nilAny) ToBool() bool { - return false -} - -func (any *nilAny) ToInt() int { - return 0 -} - -func (any *nilAny) ToInt32() int32 { - return 0 -} - -func (any *nilAny) ToInt64() int64 { - return 0 -} - -func (any *nilAny) ToUint() uint { - return 0 -} - -func (any *nilAny) ToUint32() uint32 { - return 0 -} - -func (any *nilAny) ToUint64() uint64 { - return 0 -} - -func (any *nilAny) ToFloat32() float32 { - return 0 -} - -func (any *nilAny) ToFloat64() float64 { - return 0 -} - -func (any *nilAny) ToString() string { - return "" -} - -func (any *nilAny) WriteTo(stream *Stream) { - stream.WriteNil() -} - -func (any *nilAny) Parse() *Iterator { - return nil -} - -func (any *nilAny) GetInterface() interface{} { - return nil -} diff --git a/cmd/bpa-operator/vendor/github.com/json-iterator/go/any_number.go b/cmd/bpa-operator/vendor/github.com/json-iterator/go/any_number.go deleted file mode 100644 index 9d1e901..0000000 --- a/cmd/bpa-operator/vendor/github.com/json-iterator/go/any_number.go +++ /dev/null @@ -1,123 +0,0 @@ -package jsoniter - -import ( - "io" - "unsafe" -) - -type numberLazyAny struct { - baseAny - cfg *frozenConfig - buf []byte - err error -} - -func (any *numberLazyAny) ValueType() ValueType { - return NumberValue -} - -func (any *numberLazyAny) MustBeValid() Any { - return any -} - -func (any *numberLazyAny) LastError() error { - return any.err -} - -func (any *numberLazyAny) ToBool() bool { - return any.ToFloat64() != 0 -} - -func (any *numberLazyAny) ToInt() int { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - val := iter.ReadInt() - if iter.Error != nil && iter.Error != io.EOF { - any.err = iter.Error - } - return val -} - -func (any *numberLazyAny) ToInt32() int32 { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - val := iter.ReadInt32() - if iter.Error != nil && iter.Error != io.EOF { - any.err = iter.Error - } - return val -} - -func (any *numberLazyAny) ToInt64() int64 { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - val := iter.ReadInt64() - if iter.Error != nil && iter.Error != io.EOF { - any.err = iter.Error - } - return val -} - -func (any *numberLazyAny) ToUint() uint { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - val := iter.ReadUint() - if iter.Error != nil && iter.Error != io.EOF { - any.err = iter.Error - } - return val -} - -func (any *numberLazyAny) ToUint32() uint32 { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - val := iter.ReadUint32() - if iter.Error != nil && iter.Error != io.EOF { - any.err = iter.Error - } - return val -} - -func (any *numberLazyAny) ToUint64() uint64 { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - val := iter.ReadUint64() - if iter.Error != nil && iter.Error != io.EOF { - any.err = iter.Error - } - return val -} - -func (any *numberLazyAny) ToFloat32() float32 { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - val := iter.ReadFloat32() - if iter.Error != nil && iter.Error != io.EOF { - any.err = iter.Error - } - return val -} - -func (any *numberLazyAny) ToFloat64() float64 { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - val := iter.ReadFloat64() - if iter.Error != nil && iter.Error != io.EOF { - any.err = iter.Error - } - return val -} - -func (any *numberLazyAny) ToString() string { - return *(*string)(unsafe.Pointer(&any.buf)) -} - -func (any *numberLazyAny) WriteTo(stream *Stream) { - stream.Write(any.buf) -} - -func (any *numberLazyAny) GetInterface() interface{} { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - return iter.Read() -} diff --git a/cmd/bpa-operator/vendor/github.com/json-iterator/go/any_object.go b/cmd/bpa-operator/vendor/github.com/json-iterator/go/any_object.go deleted file mode 100644 index c44ef5c..0000000 --- a/cmd/bpa-operator/vendor/github.com/json-iterator/go/any_object.go +++ /dev/null @@ -1,374 +0,0 @@ -package jsoniter - -import ( - "reflect" - "unsafe" -) - -type objectLazyAny struct { - baseAny - cfg *frozenConfig - buf []byte - err error -} - -func (any *objectLazyAny) ValueType() ValueType { - return ObjectValue -} - -func (any *objectLazyAny) MustBeValid() Any { - return any -} - -func (any *objectLazyAny) LastError() error { - return any.err -} - -func (any *objectLazyAny) ToBool() bool { - return true -} - -func (any *objectLazyAny) ToInt() int { - return 0 -} - -func (any *objectLazyAny) ToInt32() int32 { - return 0 -} - -func (any *objectLazyAny) ToInt64() int64 { - return 0 -} - -func (any *objectLazyAny) ToUint() uint { - return 0 -} - -func (any *objectLazyAny) ToUint32() uint32 { - return 0 -} - -func (any *objectLazyAny) ToUint64() uint64 { - return 0 -} - -func (any *objectLazyAny) ToFloat32() float32 { - return 0 -} - -func (any *objectLazyAny) ToFloat64() float64 { - return 0 -} - -func (any *objectLazyAny) ToString() string { - return *(*string)(unsafe.Pointer(&any.buf)) -} - -func (any *objectLazyAny) ToVal(obj interface{}) { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - iter.ReadVal(obj) -} - -func (any *objectLazyAny) Get(path ...interface{}) Any { - if len(path) == 0 { - return any - } - switch firstPath := path[0].(type) { - case string: - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - valueBytes := locateObjectField(iter, firstPath) - if valueBytes == nil { - return newInvalidAny(path) - } - iter.ResetBytes(valueBytes) - return locatePath(iter, path[1:]) - case int32: - if '*' == firstPath { - mappedAll := map[string]Any{} - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - iter.ReadMapCB(func(iter *Iterator, field string) bool { - mapped := locatePath(iter, path[1:]) - if mapped.ValueType() != InvalidValue { - mappedAll[field] = mapped - } - return true - }) - return wrapMap(mappedAll) - } - return newInvalidAny(path) - default: - return newInvalidAny(path) - } -} - -func (any *objectLazyAny) Keys() []string { - keys := []string{} - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - iter.ReadMapCB(func(iter *Iterator, field string) bool { - iter.Skip() - keys = append(keys, field) - return true - }) - return keys -} - -func (any *objectLazyAny) Size() int { - size := 0 - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - iter.ReadObjectCB(func(iter *Iterator, field string) bool { - iter.Skip() - size++ - return true - }) - return size -} - -func (any *objectLazyAny) WriteTo(stream *Stream) { - stream.Write(any.buf) -} - -func (any *objectLazyAny) GetInterface() interface{} { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - return iter.Read() -} - -type objectAny struct { - baseAny - err error - val reflect.Value -} - -func wrapStruct(val interface{}) *objectAny { - return &objectAny{baseAny{}, nil, reflect.ValueOf(val)} -} - -func (any *objectAny) ValueType() ValueType { - return ObjectValue -} - -func (any *objectAny) MustBeValid() Any { - return any -} - -func (any *objectAny) Parse() *Iterator { - return nil -} - -func (any *objectAny) LastError() error { - return any.err -} - -func (any *objectAny) ToBool() bool { - return any.val.NumField() != 0 -} - -func (any *objectAny) ToInt() int { - return 0 -} - -func (any *objectAny) ToInt32() int32 { - return 0 -} - -func (any *objectAny) ToInt64() int64 { - return 0 -} - -func (any *objectAny) ToUint() uint { - return 0 -} - -func (any *objectAny) ToUint32() uint32 { - return 0 -} - -func (any *objectAny) ToUint64() uint64 { - return 0 -} - -func (any *objectAny) ToFloat32() float32 { - return 0 -} - -func (any *objectAny) ToFloat64() float64 { - return 0 -} - -func (any *objectAny) ToString() string { - str, err := MarshalToString(any.val.Interface()) - any.err = err - return str -} - -func (any *objectAny) Get(path ...interface{}) Any { - if len(path) == 0 { - return any - } - switch firstPath := path[0].(type) { - case string: - field := any.val.FieldByName(firstPath) - if !field.IsValid() { - return newInvalidAny(path) - } - return Wrap(field.Interface()) - case int32: - if '*' == firstPath { - mappedAll := map[string]Any{} - for i := 0; i < any.val.NumField(); i++ { - field := any.val.Field(i) - if field.CanInterface() { - mapped := Wrap(field.Interface()).Get(path[1:]...) - if mapped.ValueType() != InvalidValue { - mappedAll[any.val.Type().Field(i).Name] = mapped - } - } - } - return wrapMap(mappedAll) - } - return newInvalidAny(path) - default: - return newInvalidAny(path) - } -} - -func (any *objectAny) Keys() []string { - keys := make([]string, 0, any.val.NumField()) - for i := 0; i < any.val.NumField(); i++ { - keys = append(keys, any.val.Type().Field(i).Name) - } - return keys -} - -func (any *objectAny) Size() int { - return any.val.NumField() -} - -func (any *objectAny) WriteTo(stream *Stream) { - stream.WriteVal(any.val) -} - -func (any *objectAny) GetInterface() interface{} { - return any.val.Interface() -} - -type mapAny struct { - baseAny - err error - val reflect.Value -} - -func wrapMap(val interface{}) *mapAny { - return &mapAny{baseAny{}, nil, reflect.ValueOf(val)} -} - -func (any *mapAny) ValueType() ValueType { - return ObjectValue -} - -func (any *mapAny) MustBeValid() Any { - return any -} - -func (any *mapAny) Parse() *Iterator { - return nil -} - -func (any *mapAny) LastError() error { - return any.err -} - -func (any *mapAny) ToBool() bool { - return true -} - -func (any *mapAny) ToInt() int { - return 0 -} - -func (any *mapAny) ToInt32() int32 { - return 0 -} - -func (any *mapAny) ToInt64() int64 { - return 0 -} - -func (any *mapAny) ToUint() uint { - return 0 -} - -func (any *mapAny) ToUint32() uint32 { - return 0 -} - -func (any *mapAny) ToUint64() uint64 { - return 0 -} - -func (any *mapAny) ToFloat32() float32 { - return 0 -} - -func (any *mapAny) ToFloat64() float64 { - return 0 -} - -func (any *mapAny) ToString() string { - str, err := MarshalToString(any.val.Interface()) - any.err = err - return str -} - -func (any *mapAny) Get(path ...interface{}) Any { - if len(path) == 0 { - return any - } - switch firstPath := path[0].(type) { - case int32: - if '*' == firstPath { - mappedAll := map[string]Any{} - for _, key := range any.val.MapKeys() { - keyAsStr := key.String() - element := Wrap(any.val.MapIndex(key).Interface()) - mapped := element.Get(path[1:]...) - if mapped.ValueType() != InvalidValue { - mappedAll[keyAsStr] = mapped - } - } - return wrapMap(mappedAll) - } - return newInvalidAny(path) - default: - value := any.val.MapIndex(reflect.ValueOf(firstPath)) - if !value.IsValid() { - return newInvalidAny(path) - } - return Wrap(value.Interface()) - } -} - -func (any *mapAny) Keys() []string { - keys := make([]string, 0, any.val.Len()) - for _, key := range any.val.MapKeys() { - keys = append(keys, key.String()) - } - return keys -} - -func (any *mapAny) Size() int { - return any.val.Len() -} - -func (any *mapAny) WriteTo(stream *Stream) { - stream.WriteVal(any.val) -} - -func (any *mapAny) GetInterface() interface{} { - return any.val.Interface() -} diff --git a/cmd/bpa-operator/vendor/github.com/json-iterator/go/any_str.go b/cmd/bpa-operator/vendor/github.com/json-iterator/go/any_str.go deleted file mode 100644 index a4b93c7..0000000 --- a/cmd/bpa-operator/vendor/github.com/json-iterator/go/any_str.go +++ /dev/null @@ -1,166 +0,0 @@ -package jsoniter - -import ( - "fmt" - "strconv" -) - -type stringAny struct { - baseAny - val string -} - -func (any *stringAny) Get(path ...interface{}) Any { - if len(path) == 0 { - return any - } - return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)} -} - -func (any *stringAny) Parse() *Iterator { - return nil -} - -func (any *stringAny) ValueType() ValueType { - return StringValue -} - -func (any *stringAny) MustBeValid() Any { - return any -} - -func (any *stringAny) LastError() error { - return nil -} - -func (any *stringAny) ToBool() bool { - str := any.ToString() - if str == "0" { - return false - } - for _, c := range str { - switch c { - case ' ', '\n', '\r', '\t': - default: - return true - } - } - return false -} - -func (any *stringAny) ToInt() int { - return int(any.ToInt64()) - -} - -func (any *stringAny) ToInt32() int32 { - return int32(any.ToInt64()) -} - -func (any *stringAny) ToInt64() int64 { - if any.val == "" { - return 0 - } - - flag := 1 - startPos := 0 - endPos := 0 - if any.val[0] == '+' || any.val[0] == '-' { - startPos = 1 - } - - if any.val[0] == '-' { - flag = -1 - } - - for i := startPos; i < len(any.val); i++ { - if any.val[i] >= '0' && any.val[i] <= '9' { - endPos = i + 1 - } else { - break - } - } - parsed, _ := strconv.ParseInt(any.val[startPos:endPos], 10, 64) - return int64(flag) * parsed -} - -func (any *stringAny) ToUint() uint { - return uint(any.ToUint64()) -} - -func (any *stringAny) ToUint32() uint32 { - return uint32(any.ToUint64()) -} - -func (any *stringAny) ToUint64() uint64 { - if any.val == "" { - return 0 - } - - startPos := 0 - endPos := 0 - - if any.val[0] == '-' { - return 0 - } - if any.val[0] == '+' { - startPos = 1 - } - - for i := startPos; i < len(any.val); i++ { - if any.val[i] >= '0' && any.val[i] <= '9' { - endPos = i + 1 - } else { - break - } - } - parsed, _ := strconv.ParseUint(any.val[startPos:endPos], 10, 64) - return parsed -} - -func (any *stringAny) ToFloat32() float32 { - return float32(any.ToFloat64()) -} - -func (any *stringAny) ToFloat64() float64 { - if len(any.val) == 0 { - return 0 - } - - // first char invalid - if any.val[0] != '+' && any.val[0] != '-' && (any.val[0] > '9' || any.val[0] < '0') { - return 0 - } - - // extract valid num expression from string - // eg 123true => 123, -12.12xxa => -12.12 - endPos := 1 - for i := 1; i < len(any.val); i++ { - if any.val[i] == '.' || any.val[i] == 'e' || any.val[i] == 'E' || any.val[i] == '+' || any.val[i] == '-' { - endPos = i + 1 - continue - } - - // end position is the first char which is not digit - if any.val[i] >= '0' && any.val[i] <= '9' { - endPos = i + 1 - } else { - endPos = i - break - } - } - parsed, _ := strconv.ParseFloat(any.val[:endPos], 64) - return parsed -} - -func (any *stringAny) ToString() string { - return any.val -} - -func (any *stringAny) WriteTo(stream *Stream) { - stream.WriteString(any.val) -} - -func (any *stringAny) GetInterface() interface{} { - return any.val -} diff --git a/cmd/bpa-operator/vendor/github.com/json-iterator/go/any_uint32.go b/cmd/bpa-operator/vendor/github.com/json-iterator/go/any_uint32.go deleted file mode 100644 index 656bbd3..0000000 --- a/cmd/bpa-operator/vendor/github.com/json-iterator/go/any_uint32.go +++ /dev/null @@ -1,74 +0,0 @@ -package jsoniter - -import ( - "strconv" -) - -type uint32Any struct { - baseAny - val uint32 -} - -func (any *uint32Any) LastError() error { - return nil -} - -func (any *uint32Any) ValueType() ValueType { - return NumberValue -} - -func (any *uint32Any) MustBeValid() Any { - return any -} - -func (any *uint32Any) ToBool() bool { - return any.val != 0 -} - -func (any *uint32Any) ToInt() int { - return int(any.val) -} - -func (any *uint32Any) ToInt32() int32 { - return int32(any.val) -} - -func (any *uint32Any) ToInt64() int64 { - return int64(any.val) -} - -func (any *uint32Any) ToUint() uint { - return uint(any.val) -} - -func (any *uint32Any) ToUint32() uint32 { - return any.val -} - -func (any *uint32Any) ToUint64() uint64 { - return uint64(any.val) -} - -func (any *uint32Any) ToFloat32() float32 { - return float32(any.val) -} - -func (any *uint32Any) ToFloat64() float64 { - return float64(any.val) -} - -func (any *uint32Any) ToString() string { - return strconv.FormatInt(int64(any.val), 10) -} - -func (any *uint32Any) WriteTo(stream *Stream) { - stream.WriteUint32(any.val) -} - -func (any *uint32Any) Parse() *Iterator { - return nil -} - -func (any *uint32Any) GetInterface() interface{} { - return any.val -} diff --git a/cmd/bpa-operator/vendor/github.com/json-iterator/go/any_uint64.go b/cmd/bpa-operator/vendor/github.com/json-iterator/go/any_uint64.go deleted file mode 100644 index 7df2fce..0000000 --- a/cmd/bpa-operator/vendor/github.com/json-iterator/go/any_uint64.go +++ /dev/null @@ -1,74 +0,0 @@ -package jsoniter - -import ( - "strconv" -) - -type uint64Any struct { - baseAny - val uint64 -} - -func (any *uint64Any) LastError() error { - return nil -} - -func (any *uint64Any) ValueType() ValueType { - return NumberValue -} - -func (any *uint64Any) MustBeValid() Any { - return any -} - -func (any *uint64Any) ToBool() bool { - return any.val != 0 -} - -func (any *uint64Any) ToInt() int { - return int(any.val) -} - -func (any *uint64Any) ToInt32() int32 { - return int32(any.val) -} - -func (any *uint64Any) ToInt64() int64 { - return int64(any.val) -} - -func (any *uint64Any) ToUint() uint { - return uint(any.val) -} - -func (any *uint64Any) ToUint32() uint32 { - return uint32(any.val) -} - -func (any *uint64Any) ToUint64() uint64 { - return any.val -} - -func (any *uint64Any) ToFloat32() float32 { - return float32(any.val) -} - -func (any *uint64Any) ToFloat64() float64 { - return float64(any.val) -} - -func (any *uint64Any) ToString() string { - return strconv.FormatUint(any.val, 10) -} - -func (any *uint64Any) WriteTo(stream *Stream) { - stream.WriteUint64(any.val) -} - -func (any *uint64Any) Parse() *Iterator { - return nil -} - -func (any *uint64Any) GetInterface() interface{} { - return any.val -} diff --git a/cmd/bpa-operator/vendor/github.com/json-iterator/go/build.sh b/cmd/bpa-operator/vendor/github.com/json-iterator/go/build.sh deleted file mode 100644 index b45ef68..0000000 --- a/cmd/bpa-operator/vendor/github.com/json-iterator/go/build.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash -set -e -set -x - -if [ ! -d /tmp/build-golang/src/github.com/json-iterator ]; then - mkdir -p /tmp/build-golang/src/github.com/json-iterator - ln -s $PWD /tmp/build-golang/src/github.com/json-iterator/go -fi -export GOPATH=/tmp/build-golang -go get -u github.com/golang/dep/cmd/dep -cd /tmp/build-golang/src/github.com/json-iterator/go -exec $GOPATH/bin/dep ensure -update diff --git a/cmd/bpa-operator/vendor/github.com/json-iterator/go/config.go b/cmd/bpa-operator/vendor/github.com/json-iterator/go/config.go deleted file mode 100644 index 8c58fcb..0000000 --- a/cmd/bpa-operator/vendor/github.com/json-iterator/go/config.go +++ /dev/null @@ -1,375 +0,0 @@ -package jsoniter - -import ( - "encoding/json" - "io" - "reflect" - "sync" - "unsafe" - - "github.com/modern-go/concurrent" - "github.com/modern-go/reflect2" -) - -// Config customize how the API should behave. -// The API is created from Config by Froze. -type Config struct { - IndentionStep int - MarshalFloatWith6Digits bool - EscapeHTML bool - SortMapKeys bool - UseNumber bool - DisallowUnknownFields bool - TagKey string - OnlyTaggedField bool - ValidateJsonRawMessage bool - ObjectFieldMustBeSimpleString bool - CaseSensitive bool -} - -// API the public interface of this package. -// Primary Marshal and Unmarshal. -type API interface { - IteratorPool - StreamPool - MarshalToString(v interface{}) (string, error) - Marshal(v interface{}) ([]byte, error) - MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) - UnmarshalFromString(str string, v interface{}) error - Unmarshal(data []byte, v interface{}) error - Get(data []byte, path ...interface{}) Any - NewEncoder(writer io.Writer) *Encoder - NewDecoder(reader io.Reader) *Decoder - Valid(data []byte) bool - RegisterExtension(extension Extension) - DecoderOf(typ reflect2.Type) ValDecoder - EncoderOf(typ reflect2.Type) ValEncoder -} - -// ConfigDefault the default API -var ConfigDefault = Config{ - EscapeHTML: true, -}.Froze() - -// ConfigCompatibleWithStandardLibrary tries to be 100% compatible with standard library behavior -var ConfigCompatibleWithStandardLibrary = Config{ - EscapeHTML: true, - SortMapKeys: true, - ValidateJsonRawMessage: true, -}.Froze() - -// ConfigFastest marshals float with only 6 digits precision -var ConfigFastest = Config{ - EscapeHTML: false, - MarshalFloatWith6Digits: true, // will lose precession - ObjectFieldMustBeSimpleString: true, // do not unescape object field -}.Froze() - -type frozenConfig struct { - configBeforeFrozen Config - sortMapKeys bool - indentionStep int - objectFieldMustBeSimpleString bool - onlyTaggedField bool - disallowUnknownFields bool - decoderCache *concurrent.Map - encoderCache *concurrent.Map - encoderExtension Extension - decoderExtension Extension - extraExtensions []Extension - streamPool *sync.Pool - iteratorPool *sync.Pool - caseSensitive bool -} - -func (cfg *frozenConfig) initCache() { - cfg.decoderCache = concurrent.NewMap() - cfg.encoderCache = concurrent.NewMap() -} - -func (cfg *frozenConfig) addDecoderToCache(cacheKey uintptr, decoder ValDecoder) { - cfg.decoderCache.Store(cacheKey, decoder) -} - -func (cfg *frozenConfig) addEncoderToCache(cacheKey uintptr, encoder ValEncoder) { - cfg.encoderCache.Store(cacheKey, encoder) -} - -func (cfg *frozenConfig) getDecoderFromCache(cacheKey uintptr) ValDecoder { - decoder, found := cfg.decoderCache.Load(cacheKey) - if found { - return decoder.(ValDecoder) - } - return nil -} - -func (cfg *frozenConfig) getEncoderFromCache(cacheKey uintptr) ValEncoder { - encoder, found := cfg.encoderCache.Load(cacheKey) - if found { - return encoder.(ValEncoder) - } - return nil -} - -var cfgCache = concurrent.NewMap() - -func getFrozenConfigFromCache(cfg Config) *frozenConfig { - obj, found := cfgCache.Load(cfg) - if found { - return obj.(*frozenConfig) - } - return nil -} - -func addFrozenConfigToCache(cfg Config, frozenConfig *frozenConfig) { - cfgCache.Store(cfg, frozenConfig) -} - -// Froze forge API from config -func (cfg Config) Froze() API { - api := &frozenConfig{ - sortMapKeys: cfg.SortMapKeys, - indentionStep: cfg.IndentionStep, - objectFieldMustBeSimpleString: cfg.ObjectFieldMustBeSimpleString, - onlyTaggedField: cfg.OnlyTaggedField, - disallowUnknownFields: cfg.DisallowUnknownFields, - caseSensitive: cfg.CaseSensitive, - } - api.streamPool = &sync.Pool{ - New: func() interface{} { - return NewStream(api, nil, 512) - }, - } - api.iteratorPool = &sync.Pool{ - New: func() interface{} { - return NewIterator(api) - }, - } - api.initCache() - encoderExtension := EncoderExtension{} - decoderExtension := DecoderExtension{} - if cfg.MarshalFloatWith6Digits { - api.marshalFloatWith6Digits(encoderExtension) - } - if cfg.EscapeHTML { - api.escapeHTML(encoderExtension) - } - if cfg.UseNumber { - api.useNumber(decoderExtension) - } - if cfg.ValidateJsonRawMessage { - api.validateJsonRawMessage(encoderExtension) - } - api.encoderExtension = encoderExtension - api.decoderExtension = decoderExtension - api.configBeforeFrozen = cfg - return api -} - -func (cfg Config) frozeWithCacheReuse(extraExtensions []Extension) *frozenConfig { - api := getFrozenConfigFromCache(cfg) - if api != nil { - return api - } - api = cfg.Froze().(*frozenConfig) - for _, extension := range extraExtensions { - api.RegisterExtension(extension) - } - addFrozenConfigToCache(cfg, api) - return api -} - -func (cfg *frozenConfig) validateJsonRawMessage(extension EncoderExtension) { - encoder := &funcEncoder{func(ptr unsafe.Pointer, stream *Stream) { - rawMessage := *(*json.RawMessage)(ptr) - iter := cfg.BorrowIterator([]byte(rawMessage)) - iter.Read() - if iter.Error != nil { - stream.WriteRaw("null") - } else { - cfg.ReturnIterator(iter) - stream.WriteRaw(string(rawMessage)) - } - }, func(ptr unsafe.Pointer) bool { - return len(*((*json.RawMessage)(ptr))) == 0 - }} - extension[reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem()] = encoder - extension[reflect2.TypeOfPtr((*RawMessage)(nil)).Elem()] = encoder -} - -func (cfg *frozenConfig) useNumber(extension DecoderExtension) { - extension[reflect2.TypeOfPtr((*interface{})(nil)).Elem()] = &funcDecoder{func(ptr unsafe.Pointer, iter *Iterator) { - exitingValue := *((*interface{})(ptr)) - if exitingValue != nil && reflect.TypeOf(exitingValue).Kind() == reflect.Ptr { - iter.ReadVal(exitingValue) - return - } - if iter.WhatIsNext() == NumberValue { - *((*interface{})(ptr)) = json.Number(iter.readNumberAsString()) - } else { - *((*interface{})(ptr)) = iter.Read() - } - }} -} -func (cfg *frozenConfig) getTagKey() string { - tagKey := cfg.configBeforeFrozen.TagKey - if tagKey == "" { - return "json" - } - return tagKey -} - -func (cfg *frozenConfig) RegisterExtension(extension Extension) { - cfg.extraExtensions = append(cfg.extraExtensions, extension) - copied := cfg.configBeforeFrozen - cfg.configBeforeFrozen = copied -} - -type lossyFloat32Encoder struct { -} - -func (encoder *lossyFloat32Encoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteFloat32Lossy(*((*float32)(ptr))) -} - -func (encoder *lossyFloat32Encoder) IsEmpty(ptr unsafe.Pointer) bool { - return *((*float32)(ptr)) == 0 -} - -type lossyFloat64Encoder struct { -} - -func (encoder *lossyFloat64Encoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteFloat64Lossy(*((*float64)(ptr))) -} - -func (encoder *lossyFloat64Encoder) IsEmpty(ptr unsafe.Pointer) bool { - return *((*float64)(ptr)) == 0 -} - -// EnableLossyFloatMarshalling keeps 10**(-6) precision -// for float variables for better performance. -func (cfg *frozenConfig) marshalFloatWith6Digits(extension EncoderExtension) { - // for better performance - extension[reflect2.TypeOfPtr((*float32)(nil)).Elem()] = &lossyFloat32Encoder{} - extension[reflect2.TypeOfPtr((*float64)(nil)).Elem()] = &lossyFloat64Encoder{} -} - -type htmlEscapedStringEncoder struct { -} - -func (encoder *htmlEscapedStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - str := *((*string)(ptr)) - stream.WriteStringWithHTMLEscaped(str) -} - -func (encoder *htmlEscapedStringEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return *((*string)(ptr)) == "" -} - -func (cfg *frozenConfig) escapeHTML(encoderExtension EncoderExtension) { - encoderExtension[reflect2.TypeOfPtr((*string)(nil)).Elem()] = &htmlEscapedStringEncoder{} -} - -func (cfg *frozenConfig) cleanDecoders() { - typeDecoders = map[string]ValDecoder{} - fieldDecoders = map[string]ValDecoder{} - *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig)) -} - -func (cfg *frozenConfig) cleanEncoders() { - typeEncoders = map[string]ValEncoder{} - fieldEncoders = map[string]ValEncoder{} - *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig)) -} - -func (cfg *frozenConfig) MarshalToString(v interface{}) (string, error) { - stream := cfg.BorrowStream(nil) - defer cfg.ReturnStream(stream) - stream.WriteVal(v) - if stream.Error != nil { - return "", stream.Error - } - return string(stream.Buffer()), nil -} - -func (cfg *frozenConfig) Marshal(v interface{}) ([]byte, error) { - stream := cfg.BorrowStream(nil) - defer cfg.ReturnStream(stream) - stream.WriteVal(v) - if stream.Error != nil { - return nil, stream.Error - } - result := stream.Buffer() - copied := make([]byte, len(result)) - copy(copied, result) - return copied, nil -} - -func (cfg *frozenConfig) MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { - if prefix != "" { - panic("prefix is not supported") - } - for _, r := range indent { - if r != ' ' { - panic("indent can only be space") - } - } - newCfg := cfg.configBeforeFrozen - newCfg.IndentionStep = len(indent) - return newCfg.frozeWithCacheReuse(cfg.extraExtensions).Marshal(v) -} - -func (cfg *frozenConfig) UnmarshalFromString(str string, v interface{}) error { - data := []byte(str) - iter := cfg.BorrowIterator(data) - defer cfg.ReturnIterator(iter) - iter.ReadVal(v) - c := iter.nextToken() - if c == 0 { - if iter.Error == io.EOF { - return nil - } - return iter.Error - } - iter.ReportError("Unmarshal", "there are bytes left after unmarshal") - return iter.Error -} - -func (cfg *frozenConfig) Get(data []byte, path ...interface{}) Any { - iter := cfg.BorrowIterator(data) - defer cfg.ReturnIterator(iter) - return locatePath(iter, path) -} - -func (cfg *frozenConfig) Unmarshal(data []byte, v interface{}) error { - iter := cfg.BorrowIterator(data) - defer cfg.ReturnIterator(iter) - iter.ReadVal(v) - c := iter.nextToken() - if c == 0 { - if iter.Error == io.EOF { - return nil - } - return iter.Error - } - iter.ReportError("Unmarshal", "there are bytes left after unmarshal") - return iter.Error -} - -func (cfg *frozenConfig) NewEncoder(writer io.Writer) *Encoder { - stream := NewStream(cfg, writer, 512) - return &Encoder{stream} -} - -func (cfg *frozenConfig) NewDecoder(reader io.Reader) *Decoder { - iter := Parse(cfg, reader, 512) - return &Decoder{iter} -} - -func (cfg *frozenConfig) Valid(data []byte) bool { - iter := cfg.BorrowIterator(data) - defer cfg.ReturnIterator(iter) - iter.Skip() - return iter.Error == nil -} diff --git a/cmd/bpa-operator/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md b/cmd/bpa-operator/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md deleted file mode 100644 index 3095662..0000000 --- a/cmd/bpa-operator/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md +++ /dev/null @@ -1,7 +0,0 @@ -| json type \ dest type | bool | int | uint | float |string| -| --- | --- | --- | --- |--|--| -| number | positive => true
negative => true
zero => false| 23.2 => 23
-32.1 => -32| 12.1 => 12
-12.1 => 0|as normal|same as origin| -| string | empty string => false
string "0" => false
other strings => true | "123.32" => 123
"-123.4" => -123
"123.23xxxw" => 123
"abcde12" => 0
"-32.1" => -32| 13.2 => 13
-1.1 => 0 |12.1 => 12.1
-12.3 => -12.3
12.4xxa => 12.4
+1.1e2 =>110 |same as origin| -| bool | true => true
false => false| true => 1
false => 0 | true => 1
false => 0 |true => 1
false => 0|true => "true"
false => "false"| -| object | true | 0 | 0 |0|originnal json| -| array | empty array => false
nonempty array => true| [] => 0
[1,2] => 1 | [] => 0
[1,2] => 1 |[] => 0
[1,2] => 1|original json| \ No newline at end of file diff --git a/cmd/bpa-operator/vendor/github.com/json-iterator/go/iter.go b/cmd/bpa-operator/vendor/github.com/json-iterator/go/iter.go deleted file mode 100644 index 95ae54f..0000000 --- a/cmd/bpa-operator/vendor/github.com/json-iterator/go/iter.go +++ /dev/null @@ -1,322 +0,0 @@ -package jsoniter - -import ( - "encoding/json" - "fmt" - "io" -) - -// ValueType the type for JSON element -type ValueType int - -const ( - // InvalidValue invalid JSON element - InvalidValue ValueType = iota - // StringValue JSON element "string" - StringValue - // NumberValue JSON element 100 or 0.10 - NumberValue - // NilValue JSON element null - NilValue - // BoolValue JSON element true or false - BoolValue - // ArrayValue JSON element [] - ArrayValue - // ObjectValue JSON element {} - ObjectValue -) - -var hexDigits []byte -var valueTypes []ValueType - -func init() { - hexDigits = make([]byte, 256) - for i := 0; i < len(hexDigits); i++ { - hexDigits[i] = 255 - } - for i := '0'; i <= '9'; i++ { - hexDigits[i] = byte(i - '0') - } - for i := 'a'; i <= 'f'; i++ { - hexDigits[i] = byte((i - 'a') + 10) - } - for i := 'A'; i <= 'F'; i++ { - hexDigits[i] = byte((i - 'A') + 10) - } - valueTypes = make([]ValueType, 256) - for i := 0; i < len(valueTypes); i++ { - valueTypes[i] = InvalidValue - } - valueTypes['"'] = StringValue - valueTypes['-'] = NumberValue - valueTypes['0'] = NumberValue - valueTypes['1'] = NumberValue - valueTypes['2'] = NumberValue - valueTypes['3'] = NumberValue - valueTypes['4'] = NumberValue - valueTypes['5'] = NumberValue - valueTypes['6'] = NumberValue - valueTypes['7'] = NumberValue - valueTypes['8'] = NumberValue - valueTypes['9'] = NumberValue - valueTypes['t'] = BoolValue - valueTypes['f'] = BoolValue - valueTypes['n'] = NilValue - valueTypes['['] = ArrayValue - valueTypes['{'] = ObjectValue -} - -// Iterator is a io.Reader like object, with JSON specific read functions. -// Error is not returned as return value, but stored as Error member on this iterator instance. -type Iterator struct { - cfg *frozenConfig - reader io.Reader - buf []byte - head int - tail int - captureStartedAt int - captured []byte - Error error - Attachment interface{} // open for customized decoder -} - -// NewIterator creates an empty Iterator instance -func NewIterator(cfg API) *Iterator { - return &Iterator{ - cfg: cfg.(*frozenConfig), - reader: nil, - buf: nil, - head: 0, - tail: 0, - } -} - -// Parse creates an Iterator instance from io.Reader -func Parse(cfg API, reader io.Reader, bufSize int) *Iterator { - return &Iterator{ - cfg: cfg.(*frozenConfig), - reader: reader, - buf: make([]byte, bufSize), - head: 0, - tail: 0, - } -} - -// ParseBytes creates an Iterator instance from byte array -func ParseBytes(cfg API, input []byte) *Iterator { - return &Iterator{ - cfg: cfg.(*frozenConfig), - reader: nil, - buf: input, - head: 0, - tail: len(input), - } -} - -// ParseString creates an Iterator instance from string -func ParseString(cfg API, input string) *Iterator { - return ParseBytes(cfg, []byte(input)) -} - -// Pool returns a pool can provide more iterator with same configuration -func (iter *Iterator) Pool() IteratorPool { - return iter.cfg -} - -// Reset reuse iterator instance by specifying another reader -func (iter *Iterator) Reset(reader io.Reader) *Iterator { - iter.reader = reader - iter.head = 0 - iter.tail = 0 - return iter -} - -// ResetBytes reuse iterator instance by specifying another byte array as input -func (iter *Iterator) ResetBytes(input []byte) *Iterator { - iter.reader = nil - iter.buf = input - iter.head = 0 - iter.tail = len(input) - return iter -} - -// WhatIsNext gets ValueType of relatively next json element -func (iter *Iterator) WhatIsNext() ValueType { - valueType := valueTypes[iter.nextToken()] - iter.unreadByte() - return valueType -} - -func (iter *Iterator) skipWhitespacesWithoutLoadMore() bool { - for i := iter.head; i < iter.tail; i++ { - c := iter.buf[i] - switch c { - case ' ', '\n', '\t', '\r': - continue - } - iter.head = i - return false - } - return true -} - -func (iter *Iterator) isObjectEnd() bool { - c := iter.nextToken() - if c == ',' { - return false - } - if c == '}' { - return true - } - iter.ReportError("isObjectEnd", "object ended prematurely, unexpected char "+string([]byte{c})) - return true -} - -func (iter *Iterator) nextToken() byte { - // a variation of skip whitespaces, returning the next non-whitespace token - for { - for i := iter.head; i < iter.tail; i++ { - c := iter.buf[i] - switch c { - case ' ', '\n', '\t', '\r': - continue - } - iter.head = i + 1 - return c - } - if !iter.loadMore() { - return 0 - } - } -} - -// ReportError record a error in iterator instance with current position. -func (iter *Iterator) ReportError(operation string, msg string) { - if iter.Error != nil { - if iter.Error != io.EOF { - return - } - } - peekStart := iter.head - 10 - if peekStart < 0 { - peekStart = 0 - } - peekEnd := iter.head + 10 - if peekEnd > iter.tail { - peekEnd = iter.tail - } - parsing := string(iter.buf[peekStart:peekEnd]) - contextStart := iter.head - 50 - if contextStart < 0 { - contextStart = 0 - } - contextEnd := iter.head + 50 - if contextEnd > iter.tail { - contextEnd = iter.tail - } - context := string(iter.buf[contextStart:contextEnd]) - iter.Error = fmt.Errorf("%s: %s, error found in #%v byte of ...|%s|..., bigger context ...|%s|...", - operation, msg, iter.head-peekStart, parsing, context) -} - -// CurrentBuffer gets current buffer as string for debugging purpose -func (iter *Iterator) CurrentBuffer() string { - peekStart := iter.head - 10 - if peekStart < 0 { - peekStart = 0 - } - return fmt.Sprintf("parsing #%v byte, around ...|%s|..., whole buffer ...|%s|...", iter.head, - string(iter.buf[peekStart:iter.head]), string(iter.buf[0:iter.tail])) -} - -func (iter *Iterator) readByte() (ret byte) { - if iter.head == iter.tail { - if iter.loadMore() { - ret = iter.buf[iter.head] - iter.head++ - return ret - } - return 0 - } - ret = iter.buf[iter.head] - iter.head++ - return ret -} - -func (iter *Iterator) loadMore() bool { - if iter.reader == nil { - if iter.Error == nil { - iter.head = iter.tail - iter.Error = io.EOF - } - return false - } - if iter.captured != nil { - iter.captured = append(iter.captured, - iter.buf[iter.captureStartedAt:iter.tail]...) - iter.captureStartedAt = 0 - } - for { - n, err := iter.reader.Read(iter.buf) - if n == 0 { - if err != nil { - if iter.Error == nil { - iter.Error = err - } - return false - } - } else { - iter.head = 0 - iter.tail = n - return true - } - } -} - -func (iter *Iterator) unreadByte() { - if iter.Error != nil { - return - } - iter.head-- - return -} - -// Read read the next JSON element as generic interface{}. -func (iter *Iterator) Read() interface{} { - valueType := iter.WhatIsNext() - switch valueType { - case StringValue: - return iter.ReadString() - case NumberValue: - if iter.cfg.configBeforeFrozen.UseNumber { - return json.Number(iter.readNumberAsString()) - } - return iter.ReadFloat64() - case NilValue: - iter.skipFourBytes('n', 'u', 'l', 'l') - return nil - case BoolValue: - return iter.ReadBool() - case ArrayValue: - arr := []interface{}{} - iter.ReadArrayCB(func(iter *Iterator) bool { - var elem interface{} - iter.ReadVal(&elem) - arr = append(arr, elem) - return true - }) - return arr - case ObjectValue: - obj := map[string]interface{}{} - iter.ReadMapCB(func(Iter *Iterator, field string) bool { - var elem interface{} - iter.ReadVal(&elem) - obj[field] = elem - return true - }) - return obj - default: - iter.ReportError("Read", fmt.Sprintf("unexpected value type: %v", valueType)) - return nil - } -} diff --git a/cmd/bpa-operator/vendor/github.com/json-iterator/go/iter_array.go b/cmd/bpa-operator/vendor/github.com/json-iterator/go/iter_array.go deleted file mode 100644 index 6188cb4..0000000 --- a/cmd/bpa-operator/vendor/github.com/json-iterator/go/iter_array.go +++ /dev/null @@ -1,58 +0,0 @@ -package jsoniter - -// ReadArray read array element, tells if the array has more element to read. -func (iter *Iterator) ReadArray() (ret bool) { - c := iter.nextToken() - switch c { - case 'n': - iter.skipThreeBytes('u', 'l', 'l') - return false // null - case '[': - c = iter.nextToken() - if c != ']' { - iter.unreadByte() - return true - } - return false - case ']': - return false - case ',': - return true - default: - iter.ReportError("ReadArray", "expect [ or , or ] or n, but found "+string([]byte{c})) - return - } -} - -// ReadArrayCB read array with callback -func (iter *Iterator) ReadArrayCB(callback func(*Iterator) bool) (ret bool) { - c := iter.nextToken() - if c == '[' { - c = iter.nextToken() - if c != ']' { - iter.unreadByte() - if !callback(iter) { - return false - } - c = iter.nextToken() - for c == ',' { - if !callback(iter) { - return false - } - c = iter.nextToken() - } - if c != ']' { - iter.ReportError("ReadArrayCB", "expect ] in the end, but found "+string([]byte{c})) - return false - } - return true - } - return true - } - if c == 'n' { - iter.skipThreeBytes('u', 'l', 'l') - return true // null - } - iter.ReportError("ReadArrayCB", "expect [ or n, but found "+string([]byte{c})) - return false -} diff --git a/cmd/bpa-operator/vendor/github.com/json-iterator/go/iter_float.go b/cmd/bpa-operator/vendor/github.com/json-iterator/go/iter_float.go deleted file mode 100644 index b975463..0000000 --- a/cmd/bpa-operator/vendor/github.com/json-iterator/go/iter_float.go +++ /dev/null @@ -1,339 +0,0 @@ -package jsoniter - -import ( - "encoding/json" - "io" - "math/big" - "strconv" - "strings" - "unsafe" -) - -var floatDigits []int8 - -const invalidCharForNumber = int8(-1) -const endOfNumber = int8(-2) -const dotInNumber = int8(-3) - -func init() { - floatDigits = make([]int8, 256) - for i := 0; i < len(floatDigits); i++ { - floatDigits[i] = invalidCharForNumber - } - for i := int8('0'); i <= int8('9'); i++ { - floatDigits[i] = i - int8('0') - } - floatDigits[','] = endOfNumber - floatDigits[']'] = endOfNumber - floatDigits['}'] = endOfNumber - floatDigits[' '] = endOfNumber - floatDigits['\t'] = endOfNumber - floatDigits['\n'] = endOfNumber - floatDigits['.'] = dotInNumber -} - -// ReadBigFloat read big.Float -func (iter *Iterator) ReadBigFloat() (ret *big.Float) { - str := iter.readNumberAsString() - if iter.Error != nil && iter.Error != io.EOF { - return nil - } - prec := 64 - if len(str) > prec { - prec = len(str) - } - val, _, err := big.ParseFloat(str, 10, uint(prec), big.ToZero) - if err != nil { - iter.Error = err - return nil - } - return val -} - -// ReadBigInt read big.Int -func (iter *Iterator) ReadBigInt() (ret *big.Int) { - str := iter.readNumberAsString() - if iter.Error != nil && iter.Error != io.EOF { - return nil - } - ret = big.NewInt(0) - var success bool - ret, success = ret.SetString(str, 10) - if !success { - iter.ReportError("ReadBigInt", "invalid big int") - return nil - } - return ret -} - -//ReadFloat32 read float32 -func (iter *Iterator) ReadFloat32() (ret float32) { - c := iter.nextToken() - if c == '-' { - return -iter.readPositiveFloat32() - } - iter.unreadByte() - return iter.readPositiveFloat32() -} - -func (iter *Iterator) readPositiveFloat32() (ret float32) { - i := iter.head - // first char - if i == iter.tail { - return iter.readFloat32SlowPath() - } - c := iter.buf[i] - i++ - ind := floatDigits[c] - switch ind { - case invalidCharForNumber: - return iter.readFloat32SlowPath() - case endOfNumber: - iter.ReportError("readFloat32", "empty number") - return - case dotInNumber: - iter.ReportError("readFloat32", "leading dot is invalid") - return - case 0: - if i == iter.tail { - return iter.readFloat32SlowPath() - } - c = iter.buf[i] - switch c { - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - iter.ReportError("readFloat32", "leading zero is invalid") - return - } - } - value := uint64(ind) - // chars before dot -non_decimal_loop: - for ; i < iter.tail; i++ { - c = iter.buf[i] - ind := floatDigits[c] - switch ind { - case invalidCharForNumber: - return iter.readFloat32SlowPath() - case endOfNumber: - iter.head = i - return float32(value) - case dotInNumber: - break non_decimal_loop - } - if value > uint64SafeToMultiple10 { - return iter.readFloat32SlowPath() - } - value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind; - } - // chars after dot - if c == '.' { - i++ - decimalPlaces := 0 - if i == iter.tail { - return iter.readFloat32SlowPath() - } - for ; i < iter.tail; i++ { - c = iter.buf[i] - ind := floatDigits[c] - switch ind { - case endOfNumber: - if decimalPlaces > 0 && decimalPlaces < len(pow10) { - iter.head = i - return float32(float64(value) / float64(pow10[decimalPlaces])) - } - // too many decimal places - return iter.readFloat32SlowPath() - case invalidCharForNumber, dotInNumber: - return iter.readFloat32SlowPath() - } - decimalPlaces++ - if value > uint64SafeToMultiple10 { - return iter.readFloat32SlowPath() - } - value = (value << 3) + (value << 1) + uint64(ind) - } - } - return iter.readFloat32SlowPath() -} - -func (iter *Iterator) readNumberAsString() (ret string) { - strBuf := [16]byte{} - str := strBuf[0:0] -load_loop: - for { - for i := iter.head; i < iter.tail; i++ { - c := iter.buf[i] - switch c { - case '+', '-', '.', 'e', 'E', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - str = append(str, c) - continue - default: - iter.head = i - break load_loop - } - } - if !iter.loadMore() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - return - } - if len(str) == 0 { - iter.ReportError("readNumberAsString", "invalid number") - } - return *(*string)(unsafe.Pointer(&str)) -} - -func (iter *Iterator) readFloat32SlowPath() (ret float32) { - str := iter.readNumberAsString() - if iter.Error != nil && iter.Error != io.EOF { - return - } - errMsg := validateFloat(str) - if errMsg != "" { - iter.ReportError("readFloat32SlowPath", errMsg) - return - } - val, err := strconv.ParseFloat(str, 32) - if err != nil { - iter.Error = err - return - } - return float32(val) -} - -// ReadFloat64 read float64 -func (iter *Iterator) ReadFloat64() (ret float64) { - c := iter.nextToken() - if c == '-' { - return -iter.readPositiveFloat64() - } - iter.unreadByte() - return iter.readPositiveFloat64() -} - -func (iter *Iterator) readPositiveFloat64() (ret float64) { - i := iter.head - // first char - if i == iter.tail { - return iter.readFloat64SlowPath() - } - c := iter.buf[i] - i++ - ind := floatDigits[c] - switch ind { - case invalidCharForNumber: - return iter.readFloat64SlowPath() - case endOfNumber: - iter.ReportError("readFloat64", "empty number") - return - case dotInNumber: - iter.ReportError("readFloat64", "leading dot is invalid") - return - case 0: - if i == iter.tail { - return iter.readFloat64SlowPath() - } - c = iter.buf[i] - switch c { - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - iter.ReportError("readFloat64", "leading zero is invalid") - return - } - } - value := uint64(ind) - // chars before dot -non_decimal_loop: - for ; i < iter.tail; i++ { - c = iter.buf[i] - ind := floatDigits[c] - switch ind { - case invalidCharForNumber: - return iter.readFloat64SlowPath() - case endOfNumber: - iter.head = i - return float64(value) - case dotInNumber: - break non_decimal_loop - } - if value > uint64SafeToMultiple10 { - return iter.readFloat64SlowPath() - } - value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind; - } - // chars after dot - if c == '.' { - i++ - decimalPlaces := 0 - if i == iter.tail { - return iter.readFloat64SlowPath() - } - for ; i < iter.tail; i++ { - c = iter.buf[i] - ind := floatDigits[c] - switch ind { - case endOfNumber: - if decimalPlaces > 0 && decimalPlaces < len(pow10) { - iter.head = i - return float64(value) / float64(pow10[decimalPlaces]) - } - // too many decimal places - return iter.readFloat64SlowPath() - case invalidCharForNumber, dotInNumber: - return iter.readFloat64SlowPath() - } - decimalPlaces++ - if value > uint64SafeToMultiple10 { - return iter.readFloat64SlowPath() - } - value = (value << 3) + (value << 1) + uint64(ind) - } - } - return iter.readFloat64SlowPath() -} - -func (iter *Iterator) readFloat64SlowPath() (ret float64) { - str := iter.readNumberAsString() - if iter.Error != nil && iter.Error != io.EOF { - return - } - errMsg := validateFloat(str) - if errMsg != "" { - iter.ReportError("readFloat64SlowPath", errMsg) - return - } - val, err := strconv.ParseFloat(str, 64) - if err != nil { - iter.Error = err - return - } - return val -} - -func validateFloat(str string) string { - // strconv.ParseFloat is not validating `1.` or `1.e1` - if len(str) == 0 { - return "empty number" - } - if str[0] == '-' { - return "-- is not valid" - } - dotPos := strings.IndexByte(str, '.') - if dotPos != -1 { - if dotPos == len(str)-1 { - return "dot can not be last character" - } - switch str[dotPos+1] { - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - default: - return "missing digit after dot" - } - } - return "" -} - -// ReadNumber read json.Number -func (iter *Iterator) ReadNumber() (ret json.Number) { - return json.Number(iter.readNumberAsString()) -} diff --git a/cmd/bpa-operator/vendor/github.com/json-iterator/go/iter_int.go b/cmd/bpa-operator/vendor/github.com/json-iterator/go/iter_int.go deleted file mode 100644 index 2142320..0000000 --- a/cmd/bpa-operator/vendor/github.com/json-iterator/go/iter_int.go +++ /dev/null @@ -1,345 +0,0 @@ -package jsoniter - -import ( - "math" - "strconv" -) - -var intDigits []int8 - -const uint32SafeToMultiply10 = uint32(0xffffffff)/10 - 1 -const uint64SafeToMultiple10 = uint64(0xffffffffffffffff)/10 - 1 - -func init() { - intDigits = make([]int8, 256) - for i := 0; i < len(intDigits); i++ { - intDigits[i] = invalidCharForNumber - } - for i := int8('0'); i <= int8('9'); i++ { - intDigits[i] = i - int8('0') - } -} - -// ReadUint read uint -func (iter *Iterator) ReadUint() uint { - if strconv.IntSize == 32 { - return uint(iter.ReadUint32()) - } - return uint(iter.ReadUint64()) -} - -// ReadInt read int -func (iter *Iterator) ReadInt() int { - if strconv.IntSize == 32 { - return int(iter.ReadInt32()) - } - return int(iter.ReadInt64()) -} - -// ReadInt8 read int8 -func (iter *Iterator) ReadInt8() (ret int8) { - c := iter.nextToken() - if c == '-' { - val := iter.readUint32(iter.readByte()) - if val > math.MaxInt8+1 { - iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10)) - return - } - return -int8(val) - } - val := iter.readUint32(c) - if val > math.MaxInt8 { - iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10)) - return - } - return int8(val) -} - -// ReadUint8 read uint8 -func (iter *Iterator) ReadUint8() (ret uint8) { - val := iter.readUint32(iter.nextToken()) - if val > math.MaxUint8 { - iter.ReportError("ReadUint8", "overflow: "+strconv.FormatInt(int64(val), 10)) - return - } - return uint8(val) -} - -// ReadInt16 read int16 -func (iter *Iterator) ReadInt16() (ret int16) { - c := iter.nextToken() - if c == '-' { - val := iter.readUint32(iter.readByte()) - if val > math.MaxInt16+1 { - iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10)) - return - } - return -int16(val) - } - val := iter.readUint32(c) - if val > math.MaxInt16 { - iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10)) - return - } - return int16(val) -} - -// ReadUint16 read uint16 -func (iter *Iterator) ReadUint16() (ret uint16) { - val := iter.readUint32(iter.nextToken()) - if val > math.MaxUint16 { - iter.ReportError("ReadUint16", "overflow: "+strconv.FormatInt(int64(val), 10)) - return - } - return uint16(val) -} - -// ReadInt32 read int32 -func (iter *Iterator) ReadInt32() (ret int32) { - c := iter.nextToken() - if c == '-' { - val := iter.readUint32(iter.readByte()) - if val > math.MaxInt32+1 { - iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10)) - return - } - return -int32(val) - } - val := iter.readUint32(c) - if val > math.MaxInt32 { - iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10)) - return - } - return int32(val) -} - -// ReadUint32 read uint32 -func (iter *Iterator) ReadUint32() (ret uint32) { - return iter.readUint32(iter.nextToken()) -} - -func (iter *Iterator) readUint32(c byte) (ret uint32) { - ind := intDigits[c] - if ind == 0 { - iter.assertInteger() - return 0 // single zero - } - if ind == invalidCharForNumber { - iter.ReportError("readUint32", "unexpected character: "+string([]byte{byte(ind)})) - return - } - value := uint32(ind) - if iter.tail-iter.head > 10 { - i := iter.head - ind2 := intDigits[iter.buf[i]] - if ind2 == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value - } - i++ - ind3 := intDigits[iter.buf[i]] - if ind3 == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value*10 + uint32(ind2) - } - //iter.head = i + 1 - //value = value * 100 + uint32(ind2) * 10 + uint32(ind3) - i++ - ind4 := intDigits[iter.buf[i]] - if ind4 == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value*100 + uint32(ind2)*10 + uint32(ind3) - } - i++ - ind5 := intDigits[iter.buf[i]] - if ind5 == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value*1000 + uint32(ind2)*100 + uint32(ind3)*10 + uint32(ind4) - } - i++ - ind6 := intDigits[iter.buf[i]] - if ind6 == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value*10000 + uint32(ind2)*1000 + uint32(ind3)*100 + uint32(ind4)*10 + uint32(ind5) - } - i++ - ind7 := intDigits[iter.buf[i]] - if ind7 == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value*100000 + uint32(ind2)*10000 + uint32(ind3)*1000 + uint32(ind4)*100 + uint32(ind5)*10 + uint32(ind6) - } - i++ - ind8 := intDigits[iter.buf[i]] - if ind8 == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value*1000000 + uint32(ind2)*100000 + uint32(ind3)*10000 + uint32(ind4)*1000 + uint32(ind5)*100 + uint32(ind6)*10 + uint32(ind7) - } - i++ - ind9 := intDigits[iter.buf[i]] - value = value*10000000 + uint32(ind2)*1000000 + uint32(ind3)*100000 + uint32(ind4)*10000 + uint32(ind5)*1000 + uint32(ind6)*100 + uint32(ind7)*10 + uint32(ind8) - iter.head = i - if ind9 == invalidCharForNumber { - iter.assertInteger() - return value - } - } - for { - for i := iter.head; i < iter.tail; i++ { - ind = intDigits[iter.buf[i]] - if ind == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value - } - if value > uint32SafeToMultiply10 { - value2 := (value << 3) + (value << 1) + uint32(ind) - if value2 < value { - iter.ReportError("readUint32", "overflow") - return - } - value = value2 - continue - } - value = (value << 3) + (value << 1) + uint32(ind) - } - if !iter.loadMore() { - iter.assertInteger() - return value - } - } -} - -// ReadInt64 read int64 -func (iter *Iterator) ReadInt64() (ret int64) { - c := iter.nextToken() - if c == '-' { - val := iter.readUint64(iter.readByte()) - if val > math.MaxInt64+1 { - iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10)) - return - } - return -int64(val) - } - val := iter.readUint64(c) - if val > math.MaxInt64 { - iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10)) - return - } - return int64(val) -} - -// ReadUint64 read uint64 -func (iter *Iterator) ReadUint64() uint64 { - return iter.readUint64(iter.nextToken()) -} - -func (iter *Iterator) readUint64(c byte) (ret uint64) { - ind := intDigits[c] - if ind == 0 { - iter.assertInteger() - return 0 // single zero - } - if ind == invalidCharForNumber { - iter.ReportError("readUint64", "unexpected character: "+string([]byte{byte(ind)})) - return - } - value := uint64(ind) - if iter.tail-iter.head > 10 { - i := iter.head - ind2 := intDigits[iter.buf[i]] - if ind2 == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value - } - i++ - ind3 := intDigits[iter.buf[i]] - if ind3 == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value*10 + uint64(ind2) - } - //iter.head = i + 1 - //value = value * 100 + uint32(ind2) * 10 + uint32(ind3) - i++ - ind4 := intDigits[iter.buf[i]] - if ind4 == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value*100 + uint64(ind2)*10 + uint64(ind3) - } - i++ - ind5 := intDigits[iter.buf[i]] - if ind5 == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value*1000 + uint64(ind2)*100 + uint64(ind3)*10 + uint64(ind4) - } - i++ - ind6 := intDigits[iter.buf[i]] - if ind6 == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value*10000 + uint64(ind2)*1000 + uint64(ind3)*100 + uint64(ind4)*10 + uint64(ind5) - } - i++ - ind7 := intDigits[iter.buf[i]] - if ind7 == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value*100000 + uint64(ind2)*10000 + uint64(ind3)*1000 + uint64(ind4)*100 + uint64(ind5)*10 + uint64(ind6) - } - i++ - ind8 := intDigits[iter.buf[i]] - if ind8 == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value*1000000 + uint64(ind2)*100000 + uint64(ind3)*10000 + uint64(ind4)*1000 + uint64(ind5)*100 + uint64(ind6)*10 + uint64(ind7) - } - i++ - ind9 := intDigits[iter.buf[i]] - value = value*10000000 + uint64(ind2)*1000000 + uint64(ind3)*100000 + uint64(ind4)*10000 + uint64(ind5)*1000 + uint64(ind6)*100 + uint64(ind7)*10 + uint64(ind8) - iter.head = i - if ind9 == invalidCharForNumber { - iter.assertInteger() - return value - } - } - for { - for i := iter.head; i < iter.tail; i++ { - ind = intDigits[iter.buf[i]] - if ind == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value - } - if value > uint64SafeToMultiple10 { - value2 := (value << 3) + (value << 1) + uint64(ind) - if value2 < value { - iter.ReportError("readUint64", "overflow") - return - } - value = value2 - continue - } - value = (value << 3) + (value << 1) + uint64(ind) - } - if !iter.loadMore() { - iter.assertInteger() - return value - } - } -} - -func (iter *Iterator) assertInteger() { - if iter.head < len(iter.buf) && iter.buf[iter.head] == '.' { - iter.ReportError("assertInteger", "can not decode float as int") - } -} diff --git a/cmd/bpa-operator/vendor/github.com/json-iterator/go/iter_object.go b/cmd/bpa-operator/vendor/github.com/json-iterator/go/iter_object.go deleted file mode 100644 index 1c57576..0000000 --- a/cmd/bpa-operator/vendor/github.com/json-iterator/go/iter_object.go +++ /dev/null @@ -1,251 +0,0 @@ -package jsoniter - -import ( - "fmt" - "strings" -) - -// ReadObject read one field from object. -// If object ended, returns empty string. -// Otherwise, returns the field name. -func (iter *Iterator) ReadObject() (ret string) { - c := iter.nextToken() - switch c { - case 'n': - iter.skipThreeBytes('u', 'l', 'l') - return "" // null - case '{': - c = iter.nextToken() - if c == '"' { - iter.unreadByte() - field := iter.ReadString() - c = iter.nextToken() - if c != ':' { - iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) - } - return field - } - if c == '}' { - return "" // end of object - } - iter.ReportError("ReadObject", `expect " after {, but found `+string([]byte{c})) - return - case ',': - field := iter.ReadString() - c = iter.nextToken() - if c != ':' { - iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) - } - return field - case '}': - return "" // end of object - default: - iter.ReportError("ReadObject", fmt.Sprintf(`expect { or , or } or n, but found %s`, string([]byte{c}))) - return - } -} - -// CaseInsensitive -func (iter *Iterator) readFieldHash() int64 { - hash := int64(0x811c9dc5) - c := iter.nextToken() - if c != '"' { - iter.ReportError("readFieldHash", `expect ", but found `+string([]byte{c})) - return 0 - } - for { - for i := iter.head; i < iter.tail; i++ { - // require ascii string and no escape - b := iter.buf[i] - if b == '\\' { - iter.head = i - for _, b := range iter.readStringSlowPath() { - if 'A' <= b && b <= 'Z' && !iter.cfg.caseSensitive { - b += 'a' - 'A' - } - hash ^= int64(b) - hash *= 0x1000193 - } - c = iter.nextToken() - if c != ':' { - iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c})) - return 0 - } - return hash - } - if b == '"' { - iter.head = i + 1 - c = iter.nextToken() - if c != ':' { - iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c})) - return 0 - } - return hash - } - if 'A' <= b && b <= 'Z' && !iter.cfg.caseSensitive { - b += 'a' - 'A' - } - hash ^= int64(b) - hash *= 0x1000193 - } - if !iter.loadMore() { - iter.ReportError("readFieldHash", `incomplete field name`) - return 0 - } - } -} - -func calcHash(str string, caseSensitive bool) int64 { - if !caseSensitive { - str = strings.ToLower(str) - } - hash := int64(0x811c9dc5) - for _, b := range []byte(str) { - hash ^= int64(b) - hash *= 0x1000193 - } - return int64(hash) -} - -// ReadObjectCB read object with callback, the key is ascii only and field name not copied -func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool { - c := iter.nextToken() - var field string - if c == '{' { - c = iter.nextToken() - if c == '"' { - iter.unreadByte() - field = iter.ReadString() - c = iter.nextToken() - if c != ':' { - iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) - } - if !callback(iter, field) { - return false - } - c = iter.nextToken() - for c == ',' { - field = iter.ReadString() - c = iter.nextToken() - if c != ':' { - iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) - } - if !callback(iter, field) { - return false - } - c = iter.nextToken() - } - if c != '}' { - iter.ReportError("ReadObjectCB", `object not ended with }`) - return false - } - return true - } - if c == '}' { - return true - } - iter.ReportError("ReadObjectCB", `expect " after }, but found `+string([]byte{c})) - return false - } - if c == 'n' { - iter.skipThreeBytes('u', 'l', 'l') - return true // null - } - iter.ReportError("ReadObjectCB", `expect { or n, but found `+string([]byte{c})) - return false -} - -// ReadMapCB read map with callback, the key can be any string -func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool { - c := iter.nextToken() - if c == '{' { - c = iter.nextToken() - if c == '"' { - iter.unreadByte() - field := iter.ReadString() - if iter.nextToken() != ':' { - iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) - return false - } - if !callback(iter, field) { - return false - } - c = iter.nextToken() - for c == ',' { - field = iter.ReadString() - if iter.nextToken() != ':' { - iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) - return false - } - if !callback(iter, field) { - return false - } - c = iter.nextToken() - } - if c != '}' { - iter.ReportError("ReadMapCB", `object not ended with }`) - return false - } - return true - } - if c == '}' { - return true - } - iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c})) - return false - } - if c == 'n' { - iter.skipThreeBytes('u', 'l', 'l') - return true // null - } - iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c})) - return false -} - -func (iter *Iterator) readObjectStart() bool { - c := iter.nextToken() - if c == '{' { - c = iter.nextToken() - if c == '}' { - return false - } - iter.unreadByte() - return true - } else if c == 'n' { - iter.skipThreeBytes('u', 'l', 'l') - return false - } - iter.ReportError("readObjectStart", "expect { or n, but found "+string([]byte{c})) - return false -} - -func (iter *Iterator) readObjectFieldAsBytes() (ret []byte) { - str := iter.ReadStringAsSlice() - if iter.skipWhitespacesWithoutLoadMore() { - if ret == nil { - ret = make([]byte, len(str)) - copy(ret, str) - } - if !iter.loadMore() { - return - } - } - if iter.buf[iter.head] != ':' { - iter.ReportError("readObjectFieldAsBytes", "expect : after object field, but found "+string([]byte{iter.buf[iter.head]})) - return - } - iter.head++ - if iter.skipWhitespacesWithoutLoadMore() { - if ret == nil { - ret = make([]byte, len(str)) - copy(ret, str) - } - if !iter.loadMore() { - return - } - } - if ret == nil { - return str - } - return ret -} diff --git a/cmd/bpa-operator/vendor/github.com/json-iterator/go/iter_skip.go b/cmd/bpa-operator/vendor/github.com/json-iterator/go/iter_skip.go deleted file mode 100644 index f58beb9..0000000 --- a/cmd/bpa-operator/vendor/github.com/json-iterator/go/iter_skip.go +++ /dev/null @@ -1,129 +0,0 @@ -package jsoniter - -import "fmt" - -// ReadNil reads a json object as nil and -// returns whether it's a nil or not -func (iter *Iterator) ReadNil() (ret bool) { - c := iter.nextToken() - if c == 'n' { - iter.skipThreeBytes('u', 'l', 'l') // null - return true - } - iter.unreadByte() - return false -} - -// ReadBool reads a json object as BoolValue -func (iter *Iterator) ReadBool() (ret bool) { - c := iter.nextToken() - if c == 't' { - iter.skipThreeBytes('r', 'u', 'e') - return true - } - if c == 'f' { - iter.skipFourBytes('a', 'l', 's', 'e') - return false - } - iter.ReportError("ReadBool", "expect t or f, but found "+string([]byte{c})) - return -} - -// SkipAndReturnBytes skip next JSON element, and return its content as []byte. -// The []byte can be kept, it is a copy of data. -func (iter *Iterator) SkipAndReturnBytes() []byte { - iter.startCapture(iter.head) - iter.Skip() - return iter.stopCapture() -} - -type captureBuffer struct { - startedAt int - captured []byte -} - -func (iter *Iterator) startCapture(captureStartedAt int) { - if iter.captured != nil { - panic("already in capture mode") - } - iter.captureStartedAt = captureStartedAt - iter.captured = make([]byte, 0, 32) -} - -func (iter *Iterator) stopCapture() []byte { - if iter.captured == nil { - panic("not in capture mode") - } - captured := iter.captured - remaining := iter.buf[iter.captureStartedAt:iter.head] - iter.captureStartedAt = -1 - iter.captured = nil - if len(captured) == 0 { - copied := make([]byte, len(remaining)) - copy(copied, remaining) - return copied - } - captured = append(captured, remaining...) - return captured -} - -// Skip skips a json object and positions to relatively the next json object -func (iter *Iterator) Skip() { - c := iter.nextToken() - switch c { - case '"': - iter.skipString() - case 'n': - iter.skipThreeBytes('u', 'l', 'l') // null - case 't': - iter.skipThreeBytes('r', 'u', 'e') // true - case 'f': - iter.skipFourBytes('a', 'l', 's', 'e') // false - case '0': - iter.unreadByte() - iter.ReadFloat32() - case '-', '1', '2', '3', '4', '5', '6', '7', '8', '9': - iter.skipNumber() - case '[': - iter.skipArray() - case '{': - iter.skipObject() - default: - iter.ReportError("Skip", fmt.Sprintf("do not know how to skip: %v", c)) - return - } -} - -func (iter *Iterator) skipFourBytes(b1, b2, b3, b4 byte) { - if iter.readByte() != b1 { - iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) - return - } - if iter.readByte() != b2 { - iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) - return - } - if iter.readByte() != b3 { - iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) - return - } - if iter.readByte() != b4 { - iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) - return - } -} - -func (iter *Iterator) skipThreeBytes(b1, b2, b3 byte) { - if iter.readByte() != b1 { - iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) - return - } - if iter.readByte() != b2 { - iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) - return - } - if iter.readByte() != b3 { - iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) - return - } -} diff --git a/cmd/bpa-operator/vendor/github.com/json-iterator/go/iter_skip_sloppy.go b/cmd/bpa-operator/vendor/github.com/json-iterator/go/iter_skip_sloppy.go deleted file mode 100644 index 8fcdc3b..0000000 --- a/cmd/bpa-operator/vendor/github.com/json-iterator/go/iter_skip_sloppy.go +++ /dev/null @@ -1,144 +0,0 @@ -//+build jsoniter_sloppy - -package jsoniter - -// sloppy but faster implementation, do not validate the input json - -func (iter *Iterator) skipNumber() { - for { - for i := iter.head; i < iter.tail; i++ { - c := iter.buf[i] - switch c { - case ' ', '\n', '\r', '\t', ',', '}', ']': - iter.head = i - return - } - } - if !iter.loadMore() { - return - } - } -} - -func (iter *Iterator) skipArray() { - level := 1 - for { - for i := iter.head; i < iter.tail; i++ { - switch iter.buf[i] { - case '"': // If inside string, skip it - iter.head = i + 1 - iter.skipString() - i = iter.head - 1 // it will be i++ soon - case '[': // If open symbol, increase level - level++ - case ']': // If close symbol, increase level - level-- - - // If we have returned to the original level, we're done - if level == 0 { - iter.head = i + 1 - return - } - } - } - if !iter.loadMore() { - iter.ReportError("skipObject", "incomplete array") - return - } - } -} - -func (iter *Iterator) skipObject() { - level := 1 - for { - for i := iter.head; i < iter.tail; i++ { - switch iter.buf[i] { - case '"': // If inside string, skip it - iter.head = i + 1 - iter.skipString() - i = iter.head - 1 // it will be i++ soon - case '{': // If open symbol, increase level - level++ - case '}': // If close symbol, increase level - level-- - - // If we have returned to the original level, we're done - if level == 0 { - iter.head = i + 1 - return - } - } - } - if !iter.loadMore() { - iter.ReportError("skipObject", "incomplete object") - return - } - } -} - -func (iter *Iterator) skipString() { - for { - end, escaped := iter.findStringEnd() - if end == -1 { - if !iter.loadMore() { - iter.ReportError("skipString", "incomplete string") - return - } - if escaped { - iter.head = 1 // skip the first char as last char read is \ - } - } else { - iter.head = end - return - } - } -} - -// adapted from: https://github.com/buger/jsonparser/blob/master/parser.go -// Tries to find the end of string -// Support if string contains escaped quote symbols. -func (iter *Iterator) findStringEnd() (int, bool) { - escaped := false - for i := iter.head; i < iter.tail; i++ { - c := iter.buf[i] - if c == '"' { - if !escaped { - return i + 1, false - } - j := i - 1 - for { - if j < iter.head || iter.buf[j] != '\\' { - // even number of backslashes - // either end of buffer, or " found - return i + 1, true - } - j-- - if j < iter.head || iter.buf[j] != '\\' { - // odd number of backslashes - // it is \" or \\\" - break - } - j-- - } - } else if c == '\\' { - escaped = true - } - } - j := iter.tail - 1 - for { - if j < iter.head || iter.buf[j] != '\\' { - // even number of backslashes - // either end of buffer, or " found - return -1, false // do not end with \ - } - j-- - if j < iter.head || iter.buf[j] != '\\' { - // odd number of backslashes - // it is \" or \\\" - break - } - j-- - - } - return -1, true // end with \ -} diff --git a/cmd/bpa-operator/vendor/github.com/json-iterator/go/iter_skip_strict.go b/cmd/bpa-operator/vendor/github.com/json-iterator/go/iter_skip_strict.go deleted file mode 100644 index 6cf66d0..0000000 --- a/cmd/bpa-operator/vendor/github.com/json-iterator/go/iter_skip_strict.go +++ /dev/null @@ -1,99 +0,0 @@ -//+build !jsoniter_sloppy - -package jsoniter - -import ( - "fmt" - "io" -) - -func (iter *Iterator) skipNumber() { - if !iter.trySkipNumber() { - iter.unreadByte() - if iter.Error != nil && iter.Error != io.EOF { - return - } - iter.ReadFloat64() - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = nil - iter.ReadBigFloat() - } - } -} - -func (iter *Iterator) trySkipNumber() bool { - dotFound := false - for i := iter.head; i < iter.tail; i++ { - c := iter.buf[i] - switch c { - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - case '.': - if dotFound { - iter.ReportError("validateNumber", `more than one dot found in number`) - return true // already failed - } - if i+1 == iter.tail { - return false - } - c = iter.buf[i+1] - switch c { - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - default: - iter.ReportError("validateNumber", `missing digit after dot`) - return true // already failed - } - dotFound = true - default: - switch c { - case ',', ']', '}', ' ', '\t', '\n', '\r': - if iter.head == i { - return false // if - without following digits - } - iter.head = i - return true // must be valid - } - return false // may be invalid - } - } - return false -} - -func (iter *Iterator) skipString() { - if !iter.trySkipString() { - iter.unreadByte() - iter.ReadString() - } -} - -func (iter *Iterator) trySkipString() bool { - for i := iter.head; i < iter.tail; i++ { - c := iter.buf[i] - if c == '"' { - iter.head = i + 1 - return true // valid - } else if c == '\\' { - return false - } else if c < ' ' { - iter.ReportError("trySkipString", - fmt.Sprintf(`invalid control character found: %d`, c)) - return true // already failed - } - } - return false -} - -func (iter *Iterator) skipObject() { - iter.unreadByte() - iter.ReadObjectCB(func(iter *Iterator, field string) bool { - iter.Skip() - return true - }) -} - -func (iter *Iterator) skipArray() { - iter.unreadByte() - iter.ReadArrayCB(func(iter *Iterator) bool { - iter.Skip() - return true - }) -} diff --git a/cmd/bpa-operator/vendor/github.com/json-iterator/go/iter_str.go b/cmd/bpa-operator/vendor/github.com/json-iterator/go/iter_str.go deleted file mode 100644 index adc487e..0000000 --- a/cmd/bpa-operator/vendor/github.com/json-iterator/go/iter_str.go +++ /dev/null @@ -1,215 +0,0 @@ -package jsoniter - -import ( - "fmt" - "unicode/utf16" -) - -// ReadString read string from iterator -func (iter *Iterator) ReadString() (ret string) { - c := iter.nextToken() - if c == '"' { - for i := iter.head; i < iter.tail; i++ { - c := iter.buf[i] - if c == '"' { - ret = string(iter.buf[iter.head:i]) - iter.head = i + 1 - return ret - } else if c == '\\' { - break - } else if c < ' ' { - iter.ReportError("ReadString", - fmt.Sprintf(`invalid control character found: %d`, c)) - return - } - } - return iter.readStringSlowPath() - } else if c == 'n' { - iter.skipThreeBytes('u', 'l', 'l') - return "" - } - iter.ReportError("ReadString", `expects " or n, but found `+string([]byte{c})) - return -} - -func (iter *Iterator) readStringSlowPath() (ret string) { - var str []byte - var c byte - for iter.Error == nil { - c = iter.readByte() - if c == '"' { - return string(str) - } - if c == '\\' { - c = iter.readByte() - str = iter.readEscapedChar(c, str) - } else { - str = append(str, c) - } - } - iter.ReportError("readStringSlowPath", "unexpected end of input") - return -} - -func (iter *Iterator) readEscapedChar(c byte, str []byte) []byte { - switch c { - case 'u': - r := iter.readU4() - if utf16.IsSurrogate(r) { - c = iter.readByte() - if iter.Error != nil { - return nil - } - if c != '\\' { - iter.unreadByte() - str = appendRune(str, r) - return str - } - c = iter.readByte() - if iter.Error != nil { - return nil - } - if c != 'u' { - str = appendRune(str, r) - return iter.readEscapedChar(c, str) - } - r2 := iter.readU4() - if iter.Error != nil { - return nil - } - combined := utf16.DecodeRune(r, r2) - if combined == '\uFFFD' { - str = appendRune(str, r) - str = appendRune(str, r2) - } else { - str = appendRune(str, combined) - } - } else { - str = appendRune(str, r) - } - case '"': - str = append(str, '"') - case '\\': - str = append(str, '\\') - case '/': - str = append(str, '/') - case 'b': - str = append(str, '\b') - case 'f': - str = append(str, '\f') - case 'n': - str = append(str, '\n') - case 'r': - str = append(str, '\r') - case 't': - str = append(str, '\t') - default: - iter.ReportError("readEscapedChar", - `invalid escape char after \`) - return nil - } - return str -} - -// ReadStringAsSlice read string from iterator without copying into string form. -// The []byte can not be kept, as it will change after next iterator call. -func (iter *Iterator) ReadStringAsSlice() (ret []byte) { - c := iter.nextToken() - if c == '"' { - for i := iter.head; i < iter.tail; i++ { - // require ascii string and no escape - // for: field name, base64, number - if iter.buf[i] == '"' { - // fast path: reuse the underlying buffer - ret = iter.buf[iter.head:i] - iter.head = i + 1 - return ret - } - } - readLen := iter.tail - iter.head - copied := make([]byte, readLen, readLen*2) - copy(copied, iter.buf[iter.head:iter.tail]) - iter.head = iter.tail - for iter.Error == nil { - c := iter.readByte() - if c == '"' { - return copied - } - copied = append(copied, c) - } - return copied - } - iter.ReportError("ReadStringAsSlice", `expects " or n, but found `+string([]byte{c})) - return -} - -func (iter *Iterator) readU4() (ret rune) { - for i := 0; i < 4; i++ { - c := iter.readByte() - if iter.Error != nil { - return - } - if c >= '0' && c <= '9' { - ret = ret*16 + rune(c-'0') - } else if c >= 'a' && c <= 'f' { - ret = ret*16 + rune(c-'a'+10) - } else if c >= 'A' && c <= 'F' { - ret = ret*16 + rune(c-'A'+10) - } else { - iter.ReportError("readU4", "expects 0~9 or a~f, but found "+string([]byte{c})) - return - } - } - return ret -} - -const ( - t1 = 0x00 // 0000 0000 - tx = 0x80 // 1000 0000 - t2 = 0xC0 // 1100 0000 - t3 = 0xE0 // 1110 0000 - t4 = 0xF0 // 1111 0000 - t5 = 0xF8 // 1111 1000 - - maskx = 0x3F // 0011 1111 - mask2 = 0x1F // 0001 1111 - mask3 = 0x0F // 0000 1111 - mask4 = 0x07 // 0000 0111 - - rune1Max = 1<<7 - 1 - rune2Max = 1<<11 - 1 - rune3Max = 1<<16 - 1 - - surrogateMin = 0xD800 - surrogateMax = 0xDFFF - - maxRune = '\U0010FFFF' // Maximum valid Unicode code point. - runeError = '\uFFFD' // the "error" Rune or "Unicode replacement character" -) - -func appendRune(p []byte, r rune) []byte { - // Negative values are erroneous. Making it unsigned addresses the problem. - switch i := uint32(r); { - case i <= rune1Max: - p = append(p, byte(r)) - return p - case i <= rune2Max: - p = append(p, t2|byte(r>>6)) - p = append(p, tx|byte(r)&maskx) - return p - case i > maxRune, surrogateMin <= i && i <= surrogateMax: - r = runeError - fallthrough - case i <= rune3Max: - p = append(p, t3|byte(r>>12)) - p = append(p, tx|byte(r>>6)&maskx) - p = append(p, tx|byte(r)&maskx) - return p - default: - p = append(p, t4|byte(r>>18)) - p = append(p, tx|byte(r>>12)&maskx) - p = append(p, tx|byte(r>>6)&maskx) - p = append(p, tx|byte(r)&maskx) - return p - } -} diff --git a/cmd/bpa-operator/vendor/github.com/json-iterator/go/jsoniter.go b/cmd/bpa-operator/vendor/github.com/json-iterator/go/jsoniter.go deleted file mode 100644 index c2934f9..0000000 --- a/cmd/bpa-operator/vendor/github.com/json-iterator/go/jsoniter.go +++ /dev/null @@ -1,18 +0,0 @@ -// Package jsoniter implements encoding and decoding of JSON as defined in -// RFC 4627 and provides interfaces with identical syntax of standard lib encoding/json. -// Converting from encoding/json to jsoniter is no more than replacing the package with jsoniter -// and variable type declarations (if any). -// jsoniter interfaces gives 100% compatibility with code using standard lib. -// -// "JSON and Go" -// (https://golang.org/doc/articles/json_and_go.html) -// gives a description of how Marshal/Unmarshal operate -// between arbitrary or predefined json objects and bytes, -// and it applies to jsoniter.Marshal/Unmarshal as well. -// -// Besides, jsoniter.Iterator provides a different set of interfaces -// iterating given bytes/string/reader -// and yielding parsed elements one by one. -// This set of interfaces reads input as required and gives -// better performance. -package jsoniter diff --git a/cmd/bpa-operator/vendor/github.com/json-iterator/go/pool.go b/cmd/bpa-operator/vendor/github.com/json-iterator/go/pool.go deleted file mode 100644 index e2389b5..0000000 --- a/cmd/bpa-operator/vendor/github.com/json-iterator/go/pool.go +++ /dev/null @@ -1,42 +0,0 @@ -package jsoniter - -import ( - "io" -) - -// IteratorPool a thread safe pool of iterators with same configuration -type IteratorPool interface { - BorrowIterator(data []byte) *Iterator - ReturnIterator(iter *Iterator) -} - -// StreamPool a thread safe pool of streams with same configuration -type StreamPool interface { - BorrowStream(writer io.Writer) *Stream - ReturnStream(stream *Stream) -} - -func (cfg *frozenConfig) BorrowStream(writer io.Writer) *Stream { - stream := cfg.streamPool.Get().(*Stream) - stream.Reset(writer) - return stream -} - -func (cfg *frozenConfig) ReturnStream(stream *Stream) { - stream.out = nil - stream.Error = nil - stream.Attachment = nil - cfg.streamPool.Put(stream) -} - -func (cfg *frozenConfig) BorrowIterator(data []byte) *Iterator { - iter := cfg.iteratorPool.Get().(*Iterator) - iter.ResetBytes(data) - return iter -} - -func (cfg *frozenConfig) ReturnIterator(iter *Iterator) { - iter.Error = nil - iter.Attachment = nil - cfg.iteratorPool.Put(iter) -} diff --git a/cmd/bpa-operator/vendor/github.com/json-iterator/go/reflect.go b/cmd/bpa-operator/vendor/github.com/json-iterator/go/reflect.go deleted file mode 100644 index 4459e20..0000000 --- a/cmd/bpa-operator/vendor/github.com/json-iterator/go/reflect.go +++ /dev/null @@ -1,332 +0,0 @@ -package jsoniter - -import ( - "fmt" - "reflect" - "unsafe" - - "github.com/modern-go/reflect2" -) - -// ValDecoder is an internal type registered to cache as needed. -// Don't confuse jsoniter.ValDecoder with json.Decoder. -// For json.Decoder's adapter, refer to jsoniter.AdapterDecoder(todo link). -// -// Reflection on type to create decoders, which is then cached -// Reflection on value is avoided as we can, as the reflect.Value itself will allocate, with following exceptions -// 1. create instance of new value, for example *int will need a int to be allocated -// 2. append to slice, if the existing cap is not enough, allocate will be done using Reflect.New -// 3. assignment to map, both key and value will be reflect.Value -// For a simple struct binding, it will be reflect.Value free and allocation free -type ValDecoder interface { - Decode(ptr unsafe.Pointer, iter *Iterator) -} - -// ValEncoder is an internal type registered to cache as needed. -// Don't confuse jsoniter.ValEncoder with json.Encoder. -// For json.Encoder's adapter, refer to jsoniter.AdapterEncoder(todo godoc link). -type ValEncoder interface { - IsEmpty(ptr unsafe.Pointer) bool - Encode(ptr unsafe.Pointer, stream *Stream) -} - -type checkIsEmpty interface { - IsEmpty(ptr unsafe.Pointer) bool -} - -type ctx struct { - *frozenConfig - prefix string - encoders map[reflect2.Type]ValEncoder - decoders map[reflect2.Type]ValDecoder -} - -func (b *ctx) caseSensitive() bool { - if b.frozenConfig == nil { - // default is case-insensitive - return false - } - return b.frozenConfig.caseSensitive -} - -func (b *ctx) append(prefix string) *ctx { - return &ctx{ - frozenConfig: b.frozenConfig, - prefix: b.prefix + " " + prefix, - encoders: b.encoders, - decoders: b.decoders, - } -} - -// ReadVal copy the underlying JSON into go interface, same as json.Unmarshal -func (iter *Iterator) ReadVal(obj interface{}) { - cacheKey := reflect2.RTypeOf(obj) - decoder := iter.cfg.getDecoderFromCache(cacheKey) - if decoder == nil { - typ := reflect2.TypeOf(obj) - if typ.Kind() != reflect.Ptr { - iter.ReportError("ReadVal", "can only unmarshal into pointer") - return - } - decoder = iter.cfg.DecoderOf(typ) - } - ptr := reflect2.PtrOf(obj) - if ptr == nil { - iter.ReportError("ReadVal", "can not read into nil pointer") - return - } - decoder.Decode(ptr, iter) -} - -// WriteVal copy the go interface into underlying JSON, same as json.Marshal -func (stream *Stream) WriteVal(val interface{}) { - if nil == val { - stream.WriteNil() - return - } - cacheKey := reflect2.RTypeOf(val) - encoder := stream.cfg.getEncoderFromCache(cacheKey) - if encoder == nil { - typ := reflect2.TypeOf(val) - encoder = stream.cfg.EncoderOf(typ) - } - encoder.Encode(reflect2.PtrOf(val), stream) -} - -func (cfg *frozenConfig) DecoderOf(typ reflect2.Type) ValDecoder { - cacheKey := typ.RType() - decoder := cfg.getDecoderFromCache(cacheKey) - if decoder != nil { - return decoder - } - ctx := &ctx{ - frozenConfig: cfg, - prefix: "", - decoders: map[reflect2.Type]ValDecoder{}, - encoders: map[reflect2.Type]ValEncoder{}, - } - ptrType := typ.(*reflect2.UnsafePtrType) - decoder = decoderOfType(ctx, ptrType.Elem()) - cfg.addDecoderToCache(cacheKey, decoder) - return decoder -} - -func decoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder { - decoder := getTypeDecoderFromExtension(ctx, typ) - if decoder != nil { - return decoder - } - decoder = createDecoderOfType(ctx, typ) - for _, extension := range extensions { - decoder = extension.DecorateDecoder(typ, decoder) - } - decoder = ctx.decoderExtension.DecorateDecoder(typ, decoder) - for _, extension := range ctx.extraExtensions { - decoder = extension.DecorateDecoder(typ, decoder) - } - return decoder -} - -func createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder { - decoder := ctx.decoders[typ] - if decoder != nil { - return decoder - } - placeholder := &placeholderDecoder{} - ctx.decoders[typ] = placeholder - decoder = _createDecoderOfType(ctx, typ) - placeholder.decoder = decoder - return decoder -} - -func _createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder { - decoder := createDecoderOfJsonRawMessage(ctx, typ) - if decoder != nil { - return decoder - } - decoder = createDecoderOfJsonNumber(ctx, typ) - if decoder != nil { - return decoder - } - decoder = createDecoderOfMarshaler(ctx, typ) - if decoder != nil { - return decoder - } - decoder = createDecoderOfAny(ctx, typ) - if decoder != nil { - return decoder - } - decoder = createDecoderOfNative(ctx, typ) - if decoder != nil { - return decoder - } - switch typ.Kind() { - case reflect.Interface: - ifaceType, isIFace := typ.(*reflect2.UnsafeIFaceType) - if isIFace { - return &ifaceDecoder{valType: ifaceType} - } - return &efaceDecoder{} - case reflect.Struct: - return decoderOfStruct(ctx, typ) - case reflect.Array: - return decoderOfArray(ctx, typ) - case reflect.Slice: - return decoderOfSlice(ctx, typ) - case reflect.Map: - return decoderOfMap(ctx, typ) - case reflect.Ptr: - return decoderOfOptional(ctx, typ) - default: - return &lazyErrorDecoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())} - } -} - -func (cfg *frozenConfig) EncoderOf(typ reflect2.Type) ValEncoder { - cacheKey := typ.RType() - encoder := cfg.getEncoderFromCache(cacheKey) - if encoder != nil { - return encoder - } - ctx := &ctx{ - frozenConfig: cfg, - prefix: "", - decoders: map[reflect2.Type]ValDecoder{}, - encoders: map[reflect2.Type]ValEncoder{}, - } - encoder = encoderOfType(ctx, typ) - if typ.LikePtr() { - encoder = &onePtrEncoder{encoder} - } - cfg.addEncoderToCache(cacheKey, encoder) - return encoder -} - -type onePtrEncoder struct { - encoder ValEncoder -} - -func (encoder *onePtrEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr)) -} - -func (encoder *onePtrEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - encoder.encoder.Encode(unsafe.Pointer(&ptr), stream) -} - -func encoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder { - encoder := getTypeEncoderFromExtension(ctx, typ) - if encoder != nil { - return encoder - } - encoder = createEncoderOfType(ctx, typ) - for _, extension := range extensions { - encoder = extension.DecorateEncoder(typ, encoder) - } - encoder = ctx.encoderExtension.DecorateEncoder(typ, encoder) - for _, extension := range ctx.extraExtensions { - encoder = extension.DecorateEncoder(typ, encoder) - } - return encoder -} - -func createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder { - encoder := ctx.encoders[typ] - if encoder != nil { - return encoder - } - placeholder := &placeholderEncoder{} - ctx.encoders[typ] = placeholder - encoder = _createEncoderOfType(ctx, typ) - placeholder.encoder = encoder - return encoder -} -func _createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder { - encoder := createEncoderOfJsonRawMessage(ctx, typ) - if encoder != nil { - return encoder - } - encoder = createEncoderOfJsonNumber(ctx, typ) - if encoder != nil { - return encoder - } - encoder = createEncoderOfMarshaler(ctx, typ) - if encoder != nil { - return encoder - } - encoder = createEncoderOfAny(ctx, typ) - if encoder != nil { - return encoder - } - encoder = createEncoderOfNative(ctx, typ) - if encoder != nil { - return encoder - } - kind := typ.Kind() - switch kind { - case reflect.Interface: - return &dynamicEncoder{typ} - case reflect.Struct: - return encoderOfStruct(ctx, typ) - case reflect.Array: - return encoderOfArray(ctx, typ) - case reflect.Slice: - return encoderOfSlice(ctx, typ) - case reflect.Map: - return encoderOfMap(ctx, typ) - case reflect.Ptr: - return encoderOfOptional(ctx, typ) - default: - return &lazyErrorEncoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())} - } -} - -type lazyErrorDecoder struct { - err error -} - -func (decoder *lazyErrorDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if iter.WhatIsNext() != NilValue { - if iter.Error == nil { - iter.Error = decoder.err - } - } else { - iter.Skip() - } -} - -type lazyErrorEncoder struct { - err error -} - -func (encoder *lazyErrorEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - if ptr == nil { - stream.WriteNil() - } else if stream.Error == nil { - stream.Error = encoder.err - } -} - -func (encoder *lazyErrorEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return false -} - -type placeholderDecoder struct { - decoder ValDecoder -} - -func (decoder *placeholderDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - decoder.decoder.Decode(ptr, iter) -} - -type placeholderEncoder struct { - encoder ValEncoder -} - -func (encoder *placeholderEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - encoder.encoder.Encode(ptr, stream) -} - -func (encoder *placeholderEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.encoder.IsEmpty(ptr) -} diff --git a/cmd/bpa-operator/vendor/github.com/json-iterator/go/reflect_array.go b/cmd/bpa-operator/vendor/github.com/json-iterator/go/reflect_array.go deleted file mode 100644 index 13a0b7b..0000000 --- a/cmd/bpa-operator/vendor/github.com/json-iterator/go/reflect_array.go +++ /dev/null @@ -1,104 +0,0 @@ -package jsoniter - -import ( - "fmt" - "github.com/modern-go/reflect2" - "io" - "unsafe" -) - -func decoderOfArray(ctx *ctx, typ reflect2.Type) ValDecoder { - arrayType := typ.(*reflect2.UnsafeArrayType) - decoder := decoderOfType(ctx.append("[arrayElem]"), arrayType.Elem()) - return &arrayDecoder{arrayType, decoder} -} - -func encoderOfArray(ctx *ctx, typ reflect2.Type) ValEncoder { - arrayType := typ.(*reflect2.UnsafeArrayType) - if arrayType.Len() == 0 { - return emptyArrayEncoder{} - } - encoder := encoderOfType(ctx.append("[arrayElem]"), arrayType.Elem()) - return &arrayEncoder{arrayType, encoder} -} - -type emptyArrayEncoder struct{} - -func (encoder emptyArrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteEmptyArray() -} - -func (encoder emptyArrayEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return true -} - -type arrayEncoder struct { - arrayType *reflect2.UnsafeArrayType - elemEncoder ValEncoder -} - -func (encoder *arrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteArrayStart() - elemPtr := unsafe.Pointer(ptr) - encoder.elemEncoder.Encode(elemPtr, stream) - for i := 1; i < encoder.arrayType.Len(); i++ { - stream.WriteMore() - elemPtr = encoder.arrayType.UnsafeGetIndex(ptr, i) - encoder.elemEncoder.Encode(elemPtr, stream) - } - stream.WriteArrayEnd() - if stream.Error != nil && stream.Error != io.EOF { - stream.Error = fmt.Errorf("%v: %s", encoder.arrayType, stream.Error.Error()) - } -} - -func (encoder *arrayEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return false -} - -type arrayDecoder struct { - arrayType *reflect2.UnsafeArrayType - elemDecoder ValDecoder -} - -func (decoder *arrayDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - decoder.doDecode(ptr, iter) - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.arrayType, iter.Error.Error()) - } -} - -func (decoder *arrayDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) { - c := iter.nextToken() - arrayType := decoder.arrayType - if c == 'n' { - iter.skipThreeBytes('u', 'l', 'l') - return - } - if c != '[' { - iter.ReportError("decode array", "expect [ or n, but found "+string([]byte{c})) - return - } - c = iter.nextToken() - if c == ']' { - return - } - iter.unreadByte() - elemPtr := arrayType.UnsafeGetIndex(ptr, 0) - decoder.elemDecoder.Decode(elemPtr, iter) - length := 1 - for c = iter.nextToken(); c == ','; c = iter.nextToken() { - if length >= arrayType.Len() { - iter.Skip() - continue - } - idx := length - length += 1 - elemPtr = arrayType.UnsafeGetIndex(ptr, idx) - decoder.elemDecoder.Decode(elemPtr, iter) - } - if c != ']' { - iter.ReportError("decode array", "expect ], but found "+string([]byte{c})) - return - } -} diff --git a/cmd/bpa-operator/vendor/github.com/json-iterator/go/reflect_dynamic.go b/cmd/bpa-operator/vendor/github.com/json-iterator/go/reflect_dynamic.go deleted file mode 100644 index 8b6bc8b..0000000 --- a/cmd/bpa-operator/vendor/github.com/json-iterator/go/reflect_dynamic.go +++ /dev/null @@ -1,70 +0,0 @@ -package jsoniter - -import ( - "github.com/modern-go/reflect2" - "reflect" - "unsafe" -) - -type dynamicEncoder struct { - valType reflect2.Type -} - -func (encoder *dynamicEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - obj := encoder.valType.UnsafeIndirect(ptr) - stream.WriteVal(obj) -} - -func (encoder *dynamicEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.valType.UnsafeIndirect(ptr) == nil -} - -type efaceDecoder struct { -} - -func (decoder *efaceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - pObj := (*interface{})(ptr) - obj := *pObj - if obj == nil { - *pObj = iter.Read() - return - } - typ := reflect2.TypeOf(obj) - if typ.Kind() != reflect.Ptr { - *pObj = iter.Read() - return - } - ptrType := typ.(*reflect2.UnsafePtrType) - ptrElemType := ptrType.Elem() - if iter.WhatIsNext() == NilValue { - if ptrElemType.Kind() != reflect.Ptr { - iter.skipFourBytes('n', 'u', 'l', 'l') - *pObj = nil - return - } - } - if reflect2.IsNil(obj) { - obj := ptrElemType.New() - iter.ReadVal(obj) - *pObj = obj - return - } - iter.ReadVal(obj) -} - -type ifaceDecoder struct { - valType *reflect2.UnsafeIFaceType -} - -func (decoder *ifaceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if iter.ReadNil() { - decoder.valType.UnsafeSet(ptr, decoder.valType.UnsafeNew()) - return - } - obj := decoder.valType.UnsafeIndirect(ptr) - if reflect2.IsNil(obj) { - iter.ReportError("decode non empty interface", "can not unmarshal into nil") - return - } - iter.ReadVal(obj) -} diff --git a/cmd/bpa-operator/vendor/github.com/json-iterator/go/reflect_extension.go b/cmd/bpa-operator/vendor/github.com/json-iterator/go/reflect_extension.go deleted file mode 100644 index 05e8fbf..0000000 --- a/cmd/bpa-operator/vendor/github.com/json-iterator/go/reflect_extension.go +++ /dev/null @@ -1,483 +0,0 @@ -package jsoniter - -import ( - "fmt" - "github.com/modern-go/reflect2" - "reflect" - "sort" - "strings" - "unicode" - "unsafe" -) - -var typeDecoders = map[string]ValDecoder{} -var fieldDecoders = map[string]ValDecoder{} -var typeEncoders = map[string]ValEncoder{} -var fieldEncoders = map[string]ValEncoder{} -var extensions = []Extension{} - -// StructDescriptor describe how should we encode/decode the struct -type StructDescriptor struct { - Type reflect2.Type - Fields []*Binding -} - -// GetField get one field from the descriptor by its name. -// Can not use map here to keep field orders. -func (structDescriptor *StructDescriptor) GetField(fieldName string) *Binding { - for _, binding := range structDescriptor.Fields { - if binding.Field.Name() == fieldName { - return binding - } - } - return nil -} - -// Binding describe how should we encode/decode the struct field -type Binding struct { - levels []int - Field reflect2.StructField - FromNames []string - ToNames []string - Encoder ValEncoder - Decoder ValDecoder -} - -// Extension the one for all SPI. Customize encoding/decoding by specifying alternate encoder/decoder. -// Can also rename fields by UpdateStructDescriptor. -type Extension interface { - UpdateStructDescriptor(structDescriptor *StructDescriptor) - CreateMapKeyDecoder(typ reflect2.Type) ValDecoder - CreateMapKeyEncoder(typ reflect2.Type) ValEncoder - CreateDecoder(typ reflect2.Type) ValDecoder - CreateEncoder(typ reflect2.Type) ValEncoder - DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder - DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder -} - -// DummyExtension embed this type get dummy implementation for all methods of Extension -type DummyExtension struct { -} - -// UpdateStructDescriptor No-op -func (extension *DummyExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { -} - -// CreateMapKeyDecoder No-op -func (extension *DummyExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder { - return nil -} - -// CreateMapKeyEncoder No-op -func (extension *DummyExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder { - return nil -} - -// CreateDecoder No-op -func (extension *DummyExtension) CreateDecoder(typ reflect2.Type) ValDecoder { - return nil -} - -// CreateEncoder No-op -func (extension *DummyExtension) CreateEncoder(typ reflect2.Type) ValEncoder { - return nil -} - -// DecorateDecoder No-op -func (extension *DummyExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder { - return decoder -} - -// DecorateEncoder No-op -func (extension *DummyExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder { - return encoder -} - -type EncoderExtension map[reflect2.Type]ValEncoder - -// UpdateStructDescriptor No-op -func (extension EncoderExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { -} - -// CreateDecoder No-op -func (extension EncoderExtension) CreateDecoder(typ reflect2.Type) ValDecoder { - return nil -} - -// CreateEncoder get encoder from map -func (extension EncoderExtension) CreateEncoder(typ reflect2.Type) ValEncoder { - return extension[typ] -} - -// CreateMapKeyDecoder No-op -func (extension EncoderExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder { - return nil -} - -// CreateMapKeyEncoder No-op -func (extension EncoderExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder { - return nil -} - -// DecorateDecoder No-op -func (extension EncoderExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder { - return decoder -} - -// DecorateEncoder No-op -func (extension EncoderExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder { - return encoder -} - -type DecoderExtension map[reflect2.Type]ValDecoder - -// UpdateStructDescriptor No-op -func (extension DecoderExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { -} - -// CreateMapKeyDecoder No-op -func (extension DecoderExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder { - return nil -} - -// CreateMapKeyEncoder No-op -func (extension DecoderExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder { - return nil -} - -// CreateDecoder get decoder from map -func (extension DecoderExtension) CreateDecoder(typ reflect2.Type) ValDecoder { - return extension[typ] -} - -// CreateEncoder No-op -func (extension DecoderExtension) CreateEncoder(typ reflect2.Type) ValEncoder { - return nil -} - -// DecorateDecoder No-op -func (extension DecoderExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder { - return decoder -} - -// DecorateEncoder No-op -func (extension DecoderExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder { - return encoder -} - -type funcDecoder struct { - fun DecoderFunc -} - -func (decoder *funcDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - decoder.fun(ptr, iter) -} - -type funcEncoder struct { - fun EncoderFunc - isEmptyFunc func(ptr unsafe.Pointer) bool -} - -func (encoder *funcEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - encoder.fun(ptr, stream) -} - -func (encoder *funcEncoder) IsEmpty(ptr unsafe.Pointer) bool { - if encoder.isEmptyFunc == nil { - return false - } - return encoder.isEmptyFunc(ptr) -} - -// DecoderFunc the function form of TypeDecoder -type DecoderFunc func(ptr unsafe.Pointer, iter *Iterator) - -// EncoderFunc the function form of TypeEncoder -type EncoderFunc func(ptr unsafe.Pointer, stream *Stream) - -// RegisterTypeDecoderFunc register TypeDecoder for a type with function -func RegisterTypeDecoderFunc(typ string, fun DecoderFunc) { - typeDecoders[typ] = &funcDecoder{fun} -} - -// RegisterTypeDecoder register TypeDecoder for a typ -func RegisterTypeDecoder(typ string, decoder ValDecoder) { - typeDecoders[typ] = decoder -} - -// RegisterFieldDecoderFunc register TypeDecoder for a struct field with function -func RegisterFieldDecoderFunc(typ string, field string, fun DecoderFunc) { - RegisterFieldDecoder(typ, field, &funcDecoder{fun}) -} - -// RegisterFieldDecoder register TypeDecoder for a struct field -func RegisterFieldDecoder(typ string, field string, decoder ValDecoder) { - fieldDecoders[fmt.Sprintf("%s/%s", typ, field)] = decoder -} - -// RegisterTypeEncoderFunc register TypeEncoder for a type with encode/isEmpty function -func RegisterTypeEncoderFunc(typ string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) { - typeEncoders[typ] = &funcEncoder{fun, isEmptyFunc} -} - -// RegisterTypeEncoder register TypeEncoder for a type -func RegisterTypeEncoder(typ string, encoder ValEncoder) { - typeEncoders[typ] = encoder -} - -// RegisterFieldEncoderFunc register TypeEncoder for a struct field with encode/isEmpty function -func RegisterFieldEncoderFunc(typ string, field string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) { - RegisterFieldEncoder(typ, field, &funcEncoder{fun, isEmptyFunc}) -} - -// RegisterFieldEncoder register TypeEncoder for a struct field -func RegisterFieldEncoder(typ string, field string, encoder ValEncoder) { - fieldEncoders[fmt.Sprintf("%s/%s", typ, field)] = encoder -} - -// RegisterExtension register extension -func RegisterExtension(extension Extension) { - extensions = append(extensions, extension) -} - -func getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder { - decoder := _getTypeDecoderFromExtension(ctx, typ) - if decoder != nil { - for _, extension := range extensions { - decoder = extension.DecorateDecoder(typ, decoder) - } - decoder = ctx.decoderExtension.DecorateDecoder(typ, decoder) - for _, extension := range ctx.extraExtensions { - decoder = extension.DecorateDecoder(typ, decoder) - } - } - return decoder -} -func _getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder { - for _, extension := range extensions { - decoder := extension.CreateDecoder(typ) - if decoder != nil { - return decoder - } - } - decoder := ctx.decoderExtension.CreateDecoder(typ) - if decoder != nil { - return decoder - } - for _, extension := range ctx.extraExtensions { - decoder := extension.CreateDecoder(typ) - if decoder != nil { - return decoder - } - } - typeName := typ.String() - decoder = typeDecoders[typeName] - if decoder != nil { - return decoder - } - if typ.Kind() == reflect.Ptr { - ptrType := typ.(*reflect2.UnsafePtrType) - decoder := typeDecoders[ptrType.Elem().String()] - if decoder != nil { - return &OptionalDecoder{ptrType.Elem(), decoder} - } - } - return nil -} - -func getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder { - encoder := _getTypeEncoderFromExtension(ctx, typ) - if encoder != nil { - for _, extension := range extensions { - encoder = extension.DecorateEncoder(typ, encoder) - } - encoder = ctx.encoderExtension.DecorateEncoder(typ, encoder) - for _, extension := range ctx.extraExtensions { - encoder = extension.DecorateEncoder(typ, encoder) - } - } - return encoder -} - -func _getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder { - for _, extension := range extensions { - encoder := extension.CreateEncoder(typ) - if encoder != nil { - return encoder - } - } - encoder := ctx.encoderExtension.CreateEncoder(typ) - if encoder != nil { - return encoder - } - for _, extension := range ctx.extraExtensions { - encoder := extension.CreateEncoder(typ) - if encoder != nil { - return encoder - } - } - typeName := typ.String() - encoder = typeEncoders[typeName] - if encoder != nil { - return encoder - } - if typ.Kind() == reflect.Ptr { - typePtr := typ.(*reflect2.UnsafePtrType) - encoder := typeEncoders[typePtr.Elem().String()] - if encoder != nil { - return &OptionalEncoder{encoder} - } - } - return nil -} - -func describeStruct(ctx *ctx, typ reflect2.Type) *StructDescriptor { - structType := typ.(*reflect2.UnsafeStructType) - embeddedBindings := []*Binding{} - bindings := []*Binding{} - for i := 0; i < structType.NumField(); i++ { - field := structType.Field(i) - tag, hastag := field.Tag().Lookup(ctx.getTagKey()) - if ctx.onlyTaggedField && !hastag && !field.Anonymous() { - continue - } - tagParts := strings.Split(tag, ",") - if tag == "-" { - continue - } - if field.Anonymous() && (tag == "" || tagParts[0] == "") { - if field.Type().Kind() == reflect.Struct { - structDescriptor := describeStruct(ctx, field.Type()) - for _, binding := range structDescriptor.Fields { - binding.levels = append([]int{i}, binding.levels...) - omitempty := binding.Encoder.(*structFieldEncoder).omitempty - binding.Encoder = &structFieldEncoder{field, binding.Encoder, omitempty} - binding.Decoder = &structFieldDecoder{field, binding.Decoder} - embeddedBindings = append(embeddedBindings, binding) - } - continue - } else if field.Type().Kind() == reflect.Ptr { - ptrType := field.Type().(*reflect2.UnsafePtrType) - if ptrType.Elem().Kind() == reflect.Struct { - structDescriptor := describeStruct(ctx, ptrType.Elem()) - for _, binding := range structDescriptor.Fields { - binding.levels = append([]int{i}, binding.levels...) - omitempty := binding.Encoder.(*structFieldEncoder).omitempty - binding.Encoder = &dereferenceEncoder{binding.Encoder} - binding.Encoder = &structFieldEncoder{field, binding.Encoder, omitempty} - binding.Decoder = &dereferenceDecoder{ptrType.Elem(), binding.Decoder} - binding.Decoder = &structFieldDecoder{field, binding.Decoder} - embeddedBindings = append(embeddedBindings, binding) - } - continue - } - } - } - fieldNames := calcFieldNames(field.Name(), tagParts[0], tag) - fieldCacheKey := fmt.Sprintf("%s/%s", typ.String(), field.Name()) - decoder := fieldDecoders[fieldCacheKey] - if decoder == nil { - decoder = decoderOfType(ctx.append(field.Name()), field.Type()) - } - encoder := fieldEncoders[fieldCacheKey] - if encoder == nil { - encoder = encoderOfType(ctx.append(field.Name()), field.Type()) - } - binding := &Binding{ - Field: field, - FromNames: fieldNames, - ToNames: fieldNames, - Decoder: decoder, - Encoder: encoder, - } - binding.levels = []int{i} - bindings = append(bindings, binding) - } - return createStructDescriptor(ctx, typ, bindings, embeddedBindings) -} -func createStructDescriptor(ctx *ctx, typ reflect2.Type, bindings []*Binding, embeddedBindings []*Binding) *StructDescriptor { - structDescriptor := &StructDescriptor{ - Type: typ, - Fields: bindings, - } - for _, extension := range extensions { - extension.UpdateStructDescriptor(structDescriptor) - } - ctx.encoderExtension.UpdateStructDescriptor(structDescriptor) - ctx.decoderExtension.UpdateStructDescriptor(structDescriptor) - for _, extension := range ctx.extraExtensions { - extension.UpdateStructDescriptor(structDescriptor) - } - processTags(structDescriptor, ctx.frozenConfig) - // merge normal & embedded bindings & sort with original order - allBindings := sortableBindings(append(embeddedBindings, structDescriptor.Fields...)) - sort.Sort(allBindings) - structDescriptor.Fields = allBindings - return structDescriptor -} - -type sortableBindings []*Binding - -func (bindings sortableBindings) Len() int { - return len(bindings) -} - -func (bindings sortableBindings) Less(i, j int) bool { - left := bindings[i].levels - right := bindings[j].levels - k := 0 - for { - if left[k] < right[k] { - return true - } else if left[k] > right[k] { - return false - } - k++ - } -} - -func (bindings sortableBindings) Swap(i, j int) { - bindings[i], bindings[j] = bindings[j], bindings[i] -} - -func processTags(structDescriptor *StructDescriptor, cfg *frozenConfig) { - for _, binding := range structDescriptor.Fields { - shouldOmitEmpty := false - tagParts := strings.Split(binding.Field.Tag().Get(cfg.getTagKey()), ",") - for _, tagPart := range tagParts[1:] { - if tagPart == "omitempty" { - shouldOmitEmpty = true - } else if tagPart == "string" { - if binding.Field.Type().Kind() == reflect.String { - binding.Decoder = &stringModeStringDecoder{binding.Decoder, cfg} - binding.Encoder = &stringModeStringEncoder{binding.Encoder, cfg} - } else { - binding.Decoder = &stringModeNumberDecoder{binding.Decoder} - binding.Encoder = &stringModeNumberEncoder{binding.Encoder} - } - } - } - binding.Decoder = &structFieldDecoder{binding.Field, binding.Decoder} - binding.Encoder = &structFieldEncoder{binding.Field, binding.Encoder, shouldOmitEmpty} - } -} - -func calcFieldNames(originalFieldName string, tagProvidedFieldName string, wholeTag string) []string { - // ignore? - if wholeTag == "-" { - return []string{} - } - // rename? - var fieldNames []string - if tagProvidedFieldName == "" { - fieldNames = []string{originalFieldName} - } else { - fieldNames = []string{tagProvidedFieldName} - } - // private? - isNotExported := unicode.IsLower(rune(originalFieldName[0])) - if isNotExported { - fieldNames = []string{} - } - return fieldNames -} diff --git a/cmd/bpa-operator/vendor/github.com/json-iterator/go/reflect_json_number.go b/cmd/bpa-operator/vendor/github.com/json-iterator/go/reflect_json_number.go deleted file mode 100644 index 98d45c1..0000000 --- a/cmd/bpa-operator/vendor/github.com/json-iterator/go/reflect_json_number.go +++ /dev/null @@ -1,112 +0,0 @@ -package jsoniter - -import ( - "encoding/json" - "github.com/modern-go/reflect2" - "strconv" - "unsafe" -) - -type Number string - -// String returns the literal text of the number. -func (n Number) String() string { return string(n) } - -// Float64 returns the number as a float64. -func (n Number) Float64() (float64, error) { - return strconv.ParseFloat(string(n), 64) -} - -// Int64 returns the number as an int64. -func (n Number) Int64() (int64, error) { - return strconv.ParseInt(string(n), 10, 64) -} - -func CastJsonNumber(val interface{}) (string, bool) { - switch typedVal := val.(type) { - case json.Number: - return string(typedVal), true - case Number: - return string(typedVal), true - } - return "", false -} - -var jsonNumberType = reflect2.TypeOfPtr((*json.Number)(nil)).Elem() -var jsoniterNumberType = reflect2.TypeOfPtr((*Number)(nil)).Elem() - -func createDecoderOfJsonNumber(ctx *ctx, typ reflect2.Type) ValDecoder { - if typ.AssignableTo(jsonNumberType) { - return &jsonNumberCodec{} - } - if typ.AssignableTo(jsoniterNumberType) { - return &jsoniterNumberCodec{} - } - return nil -} - -func createEncoderOfJsonNumber(ctx *ctx, typ reflect2.Type) ValEncoder { - if typ.AssignableTo(jsonNumberType) { - return &jsonNumberCodec{} - } - if typ.AssignableTo(jsoniterNumberType) { - return &jsoniterNumberCodec{} - } - return nil -} - -type jsonNumberCodec struct { -} - -func (codec *jsonNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - switch iter.WhatIsNext() { - case StringValue: - *((*json.Number)(ptr)) = json.Number(iter.ReadString()) - case NilValue: - iter.skipFourBytes('n', 'u', 'l', 'l') - *((*json.Number)(ptr)) = "" - default: - *((*json.Number)(ptr)) = json.Number([]byte(iter.readNumberAsString())) - } -} - -func (codec *jsonNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - number := *((*json.Number)(ptr)) - if len(number) == 0 { - stream.writeByte('0') - } else { - stream.WriteRaw(string(number)) - } -} - -func (codec *jsonNumberCodec) IsEmpty(ptr unsafe.Pointer) bool { - return len(*((*json.Number)(ptr))) == 0 -} - -type jsoniterNumberCodec struct { -} - -func (codec *jsoniterNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - switch iter.WhatIsNext() { - case StringValue: - *((*Number)(ptr)) = Number(iter.ReadString()) - case NilValue: - iter.skipFourBytes('n', 'u', 'l', 'l') - *((*Number)(ptr)) = "" - default: - *((*Number)(ptr)) = Number([]byte(iter.readNumberAsString())) - } -} - -func (codec *jsoniterNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - number := *((*Number)(ptr)) - if len(number) == 0 { - stream.writeByte('0') - } else { - stream.WriteRaw(string(number)) - } -} - -func (codec *jsoniterNumberCodec) IsEmpty(ptr unsafe.Pointer) bool { - return len(*((*Number)(ptr))) == 0 -} diff --git a/cmd/bpa-operator/vendor/github.com/json-iterator/go/reflect_json_raw_message.go b/cmd/bpa-operator/vendor/github.com/json-iterator/go/reflect_json_raw_message.go deleted file mode 100644 index f261993..0000000 --- a/cmd/bpa-operator/vendor/github.com/json-iterator/go/reflect_json_raw_message.go +++ /dev/null @@ -1,60 +0,0 @@ -package jsoniter - -import ( - "encoding/json" - "github.com/modern-go/reflect2" - "unsafe" -) - -var jsonRawMessageType = reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem() -var jsoniterRawMessageType = reflect2.TypeOfPtr((*RawMessage)(nil)).Elem() - -func createEncoderOfJsonRawMessage(ctx *ctx, typ reflect2.Type) ValEncoder { - if typ == jsonRawMessageType { - return &jsonRawMessageCodec{} - } - if typ == jsoniterRawMessageType { - return &jsoniterRawMessageCodec{} - } - return nil -} - -func createDecoderOfJsonRawMessage(ctx *ctx, typ reflect2.Type) ValDecoder { - if typ == jsonRawMessageType { - return &jsonRawMessageCodec{} - } - if typ == jsoniterRawMessageType { - return &jsoniterRawMessageCodec{} - } - return nil -} - -type jsonRawMessageCodec struct { -} - -func (codec *jsonRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*json.RawMessage)(ptr)) = json.RawMessage(iter.SkipAndReturnBytes()) -} - -func (codec *jsonRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteRaw(string(*((*json.RawMessage)(ptr)))) -} - -func (codec *jsonRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { - return len(*((*json.RawMessage)(ptr))) == 0 -} - -type jsoniterRawMessageCodec struct { -} - -func (codec *jsoniterRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*RawMessage)(ptr)) = RawMessage(iter.SkipAndReturnBytes()) -} - -func (codec *jsoniterRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteRaw(string(*((*RawMessage)(ptr)))) -} - -func (codec *jsoniterRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { - return len(*((*RawMessage)(ptr))) == 0 -} diff --git a/cmd/bpa-operator/vendor/github.com/json-iterator/go/reflect_map.go b/cmd/bpa-operator/vendor/github.com/json-iterator/go/reflect_map.go deleted file mode 100644 index 547b442..0000000 --- a/cmd/bpa-operator/vendor/github.com/json-iterator/go/reflect_map.go +++ /dev/null @@ -1,338 +0,0 @@ -package jsoniter - -import ( - "fmt" - "github.com/modern-go/reflect2" - "io" - "reflect" - "sort" - "unsafe" -) - -func decoderOfMap(ctx *ctx, typ reflect2.Type) ValDecoder { - mapType := typ.(*reflect2.UnsafeMapType) - keyDecoder := decoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()) - elemDecoder := decoderOfType(ctx.append("[mapElem]"), mapType.Elem()) - return &mapDecoder{ - mapType: mapType, - keyType: mapType.Key(), - elemType: mapType.Elem(), - keyDecoder: keyDecoder, - elemDecoder: elemDecoder, - } -} - -func encoderOfMap(ctx *ctx, typ reflect2.Type) ValEncoder { - mapType := typ.(*reflect2.UnsafeMapType) - if ctx.sortMapKeys { - return &sortKeysMapEncoder{ - mapType: mapType, - keyEncoder: encoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()), - elemEncoder: encoderOfType(ctx.append("[mapElem]"), mapType.Elem()), - } - } - return &mapEncoder{ - mapType: mapType, - keyEncoder: encoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()), - elemEncoder: encoderOfType(ctx.append("[mapElem]"), mapType.Elem()), - } -} - -func decoderOfMapKey(ctx *ctx, typ reflect2.Type) ValDecoder { - decoder := ctx.decoderExtension.CreateMapKeyDecoder(typ) - if decoder != nil { - return decoder - } - for _, extension := range ctx.extraExtensions { - decoder := extension.CreateMapKeyDecoder(typ) - if decoder != nil { - return decoder - } - } - switch typ.Kind() { - case reflect.String: - return decoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String)) - case reflect.Bool, - reflect.Uint8, reflect.Int8, - reflect.Uint16, reflect.Int16, - reflect.Uint32, reflect.Int32, - reflect.Uint64, reflect.Int64, - reflect.Uint, reflect.Int, - reflect.Float32, reflect.Float64, - reflect.Uintptr: - typ = reflect2.DefaultTypeOfKind(typ.Kind()) - return &numericMapKeyDecoder{decoderOfType(ctx, typ)} - default: - ptrType := reflect2.PtrTo(typ) - if ptrType.Implements(unmarshalerType) { - return &referenceDecoder{ - &unmarshalerDecoder{ - valType: ptrType, - }, - } - } - if typ.Implements(unmarshalerType) { - return &unmarshalerDecoder{ - valType: typ, - } - } - if ptrType.Implements(textUnmarshalerType) { - return &referenceDecoder{ - &textUnmarshalerDecoder{ - valType: ptrType, - }, - } - } - if typ.Implements(textUnmarshalerType) { - return &textUnmarshalerDecoder{ - valType: typ, - } - } - return &lazyErrorDecoder{err: fmt.Errorf("unsupported map key type: %v", typ)} - } -} - -func encoderOfMapKey(ctx *ctx, typ reflect2.Type) ValEncoder { - encoder := ctx.encoderExtension.CreateMapKeyEncoder(typ) - if encoder != nil { - return encoder - } - for _, extension := range ctx.extraExtensions { - encoder := extension.CreateMapKeyEncoder(typ) - if encoder != nil { - return encoder - } - } - switch typ.Kind() { - case reflect.String: - return encoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String)) - case reflect.Bool, - reflect.Uint8, reflect.Int8, - reflect.Uint16, reflect.Int16, - reflect.Uint32, reflect.Int32, - reflect.Uint64, reflect.Int64, - reflect.Uint, reflect.Int, - reflect.Float32, reflect.Float64, - reflect.Uintptr: - typ = reflect2.DefaultTypeOfKind(typ.Kind()) - return &numericMapKeyEncoder{encoderOfType(ctx, typ)} - default: - if typ == textMarshalerType { - return &directTextMarshalerEncoder{ - stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), - } - } - if typ.Implements(textMarshalerType) { - return &textMarshalerEncoder{ - valType: typ, - stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), - } - } - if typ.Kind() == reflect.Interface { - return &dynamicMapKeyEncoder{ctx, typ} - } - return &lazyErrorEncoder{err: fmt.Errorf("unsupported map key type: %v", typ)} - } -} - -type mapDecoder struct { - mapType *reflect2.UnsafeMapType - keyType reflect2.Type - elemType reflect2.Type - keyDecoder ValDecoder - elemDecoder ValDecoder -} - -func (decoder *mapDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - mapType := decoder.mapType - c := iter.nextToken() - if c == 'n' { - iter.skipThreeBytes('u', 'l', 'l') - *(*unsafe.Pointer)(ptr) = nil - mapType.UnsafeSet(ptr, mapType.UnsafeNew()) - return - } - if mapType.UnsafeIsNil(ptr) { - mapType.UnsafeSet(ptr, mapType.UnsafeMakeMap(0)) - } - if c != '{' { - iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c})) - return - } - c = iter.nextToken() - if c == '}' { - return - } - if c != '"' { - iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c})) - return - } - iter.unreadByte() - key := decoder.keyType.UnsafeNew() - decoder.keyDecoder.Decode(key, iter) - c = iter.nextToken() - if c != ':' { - iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) - return - } - elem := decoder.elemType.UnsafeNew() - decoder.elemDecoder.Decode(elem, iter) - decoder.mapType.UnsafeSetIndex(ptr, key, elem) - for c = iter.nextToken(); c == ','; c = iter.nextToken() { - key := decoder.keyType.UnsafeNew() - decoder.keyDecoder.Decode(key, iter) - c = iter.nextToken() - if c != ':' { - iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) - return - } - elem := decoder.elemType.UnsafeNew() - decoder.elemDecoder.Decode(elem, iter) - decoder.mapType.UnsafeSetIndex(ptr, key, elem) - } - if c != '}' { - iter.ReportError("ReadMapCB", `expect }, but found `+string([]byte{c})) - } -} - -type numericMapKeyDecoder struct { - decoder ValDecoder -} - -func (decoder *numericMapKeyDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - c := iter.nextToken() - if c != '"' { - iter.ReportError("ReadMapCB", `expect ", but found `+string([]byte{c})) - return - } - decoder.decoder.Decode(ptr, iter) - c = iter.nextToken() - if c != '"' { - iter.ReportError("ReadMapCB", `expect ", but found `+string([]byte{c})) - return - } -} - -type numericMapKeyEncoder struct { - encoder ValEncoder -} - -func (encoder *numericMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.writeByte('"') - encoder.encoder.Encode(ptr, stream) - stream.writeByte('"') -} - -func (encoder *numericMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return false -} - -type dynamicMapKeyEncoder struct { - ctx *ctx - valType reflect2.Type -} - -func (encoder *dynamicMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - obj := encoder.valType.UnsafeIndirect(ptr) - encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).Encode(reflect2.PtrOf(obj), stream) -} - -func (encoder *dynamicMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool { - obj := encoder.valType.UnsafeIndirect(ptr) - return encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).IsEmpty(reflect2.PtrOf(obj)) -} - -type mapEncoder struct { - mapType *reflect2.UnsafeMapType - keyEncoder ValEncoder - elemEncoder ValEncoder -} - -func (encoder *mapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteObjectStart() - iter := encoder.mapType.UnsafeIterate(ptr) - for i := 0; iter.HasNext(); i++ { - if i != 0 { - stream.WriteMore() - } - key, elem := iter.UnsafeNext() - encoder.keyEncoder.Encode(key, stream) - if stream.indention > 0 { - stream.writeTwoBytes(byte(':'), byte(' ')) - } else { - stream.writeByte(':') - } - encoder.elemEncoder.Encode(elem, stream) - } - stream.WriteObjectEnd() -} - -func (encoder *mapEncoder) IsEmpty(ptr unsafe.Pointer) bool { - iter := encoder.mapType.UnsafeIterate(ptr) - return !iter.HasNext() -} - -type sortKeysMapEncoder struct { - mapType *reflect2.UnsafeMapType - keyEncoder ValEncoder - elemEncoder ValEncoder -} - -func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - if *(*unsafe.Pointer)(ptr) == nil { - stream.WriteNil() - return - } - stream.WriteObjectStart() - mapIter := encoder.mapType.UnsafeIterate(ptr) - subStream := stream.cfg.BorrowStream(nil) - subIter := stream.cfg.BorrowIterator(nil) - keyValues := encodedKeyValues{} - for mapIter.HasNext() { - subStream.buf = make([]byte, 0, 64) - key, elem := mapIter.UnsafeNext() - encoder.keyEncoder.Encode(key, subStream) - if subStream.Error != nil && subStream.Error != io.EOF && stream.Error == nil { - stream.Error = subStream.Error - } - encodedKey := subStream.Buffer() - subIter.ResetBytes(encodedKey) - decodedKey := subIter.ReadString() - if stream.indention > 0 { - subStream.writeTwoBytes(byte(':'), byte(' ')) - } else { - subStream.writeByte(':') - } - encoder.elemEncoder.Encode(elem, subStream) - keyValues = append(keyValues, encodedKV{ - key: decodedKey, - keyValue: subStream.Buffer(), - }) - } - sort.Sort(keyValues) - for i, keyValue := range keyValues { - if i != 0 { - stream.WriteMore() - } - stream.Write(keyValue.keyValue) - } - stream.WriteObjectEnd() - stream.cfg.ReturnStream(subStream) - stream.cfg.ReturnIterator(subIter) -} - -func (encoder *sortKeysMapEncoder) IsEmpty(ptr unsafe.Pointer) bool { - iter := encoder.mapType.UnsafeIterate(ptr) - return !iter.HasNext() -} - -type encodedKeyValues []encodedKV - -type encodedKV struct { - key string - keyValue []byte -} - -func (sv encodedKeyValues) Len() int { return len(sv) } -func (sv encodedKeyValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } -func (sv encodedKeyValues) Less(i, j int) bool { return sv[i].key < sv[j].key } diff --git a/cmd/bpa-operator/vendor/github.com/json-iterator/go/reflect_marshaler.go b/cmd/bpa-operator/vendor/github.com/json-iterator/go/reflect_marshaler.go deleted file mode 100644 index fea5071..0000000 --- a/cmd/bpa-operator/vendor/github.com/json-iterator/go/reflect_marshaler.go +++ /dev/null @@ -1,217 +0,0 @@ -package jsoniter - -import ( - "encoding" - "encoding/json" - "github.com/modern-go/reflect2" - "unsafe" -) - -var marshalerType = reflect2.TypeOfPtr((*json.Marshaler)(nil)).Elem() -var unmarshalerType = reflect2.TypeOfPtr((*json.Unmarshaler)(nil)).Elem() -var textMarshalerType = reflect2.TypeOfPtr((*encoding.TextMarshaler)(nil)).Elem() -var textUnmarshalerType = reflect2.TypeOfPtr((*encoding.TextUnmarshaler)(nil)).Elem() - -func createDecoderOfMarshaler(ctx *ctx, typ reflect2.Type) ValDecoder { - ptrType := reflect2.PtrTo(typ) - if ptrType.Implements(unmarshalerType) { - return &referenceDecoder{ - &unmarshalerDecoder{ptrType}, - } - } - if ptrType.Implements(textUnmarshalerType) { - return &referenceDecoder{ - &textUnmarshalerDecoder{ptrType}, - } - } - return nil -} - -func createEncoderOfMarshaler(ctx *ctx, typ reflect2.Type) ValEncoder { - if typ == marshalerType { - checkIsEmpty := createCheckIsEmpty(ctx, typ) - var encoder ValEncoder = &directMarshalerEncoder{ - checkIsEmpty: checkIsEmpty, - } - return encoder - } - if typ.Implements(marshalerType) { - checkIsEmpty := createCheckIsEmpty(ctx, typ) - var encoder ValEncoder = &marshalerEncoder{ - valType: typ, - checkIsEmpty: checkIsEmpty, - } - return encoder - } - ptrType := reflect2.PtrTo(typ) - if ctx.prefix != "" && ptrType.Implements(marshalerType) { - checkIsEmpty := createCheckIsEmpty(ctx, ptrType) - var encoder ValEncoder = &marshalerEncoder{ - valType: ptrType, - checkIsEmpty: checkIsEmpty, - } - return &referenceEncoder{encoder} - } - if typ == textMarshalerType { - checkIsEmpty := createCheckIsEmpty(ctx, typ) - var encoder ValEncoder = &directTextMarshalerEncoder{ - checkIsEmpty: checkIsEmpty, - stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), - } - return encoder - } - if typ.Implements(textMarshalerType) { - checkIsEmpty := createCheckIsEmpty(ctx, typ) - var encoder ValEncoder = &textMarshalerEncoder{ - valType: typ, - stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), - checkIsEmpty: checkIsEmpty, - } - return encoder - } - // if prefix is empty, the type is the root type - if ctx.prefix != "" && ptrType.Implements(textMarshalerType) { - checkIsEmpty := createCheckIsEmpty(ctx, ptrType) - var encoder ValEncoder = &textMarshalerEncoder{ - valType: ptrType, - stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), - checkIsEmpty: checkIsEmpty, - } - return &referenceEncoder{encoder} - } - return nil -} - -type marshalerEncoder struct { - checkIsEmpty checkIsEmpty - valType reflect2.Type -} - -func (encoder *marshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - obj := encoder.valType.UnsafeIndirect(ptr) - if encoder.valType.IsNullable() && reflect2.IsNil(obj) { - stream.WriteNil() - return - } - bytes, err := json.Marshal(obj) - if err != nil { - stream.Error = err - } else { - stream.Write(bytes) - } -} - -func (encoder *marshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.checkIsEmpty.IsEmpty(ptr) -} - -type directMarshalerEncoder struct { - checkIsEmpty checkIsEmpty -} - -func (encoder *directMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - marshaler := *(*json.Marshaler)(ptr) - if marshaler == nil { - stream.WriteNil() - return - } - bytes, err := marshaler.MarshalJSON() - if err != nil { - stream.Error = err - } else { - stream.Write(bytes) - } -} - -func (encoder *directMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.checkIsEmpty.IsEmpty(ptr) -} - -type textMarshalerEncoder struct { - valType reflect2.Type - stringEncoder ValEncoder - checkIsEmpty checkIsEmpty -} - -func (encoder *textMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - obj := encoder.valType.UnsafeIndirect(ptr) - if encoder.valType.IsNullable() && reflect2.IsNil(obj) { - stream.WriteNil() - return - } - marshaler := (obj).(encoding.TextMarshaler) - bytes, err := marshaler.MarshalText() - if err != nil { - stream.Error = err - } else { - str := string(bytes) - encoder.stringEncoder.Encode(unsafe.Pointer(&str), stream) - } -} - -func (encoder *textMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.checkIsEmpty.IsEmpty(ptr) -} - -type directTextMarshalerEncoder struct { - stringEncoder ValEncoder - checkIsEmpty checkIsEmpty -} - -func (encoder *directTextMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - marshaler := *(*encoding.TextMarshaler)(ptr) - if marshaler == nil { - stream.WriteNil() - return - } - bytes, err := marshaler.MarshalText() - if err != nil { - stream.Error = err - } else { - str := string(bytes) - encoder.stringEncoder.Encode(unsafe.Pointer(&str), stream) - } -} - -func (encoder *directTextMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.checkIsEmpty.IsEmpty(ptr) -} - -type unmarshalerDecoder struct { - valType reflect2.Type -} - -func (decoder *unmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - valType := decoder.valType - obj := valType.UnsafeIndirect(ptr) - unmarshaler := obj.(json.Unmarshaler) - iter.nextToken() - iter.unreadByte() // skip spaces - bytes := iter.SkipAndReturnBytes() - err := unmarshaler.UnmarshalJSON(bytes) - if err != nil { - iter.ReportError("unmarshalerDecoder", err.Error()) - } -} - -type textUnmarshalerDecoder struct { - valType reflect2.Type -} - -func (decoder *textUnmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - valType := decoder.valType - obj := valType.UnsafeIndirect(ptr) - if reflect2.IsNil(obj) { - ptrType := valType.(*reflect2.UnsafePtrType) - elemType := ptrType.Elem() - elem := elemType.UnsafeNew() - ptrType.UnsafeSet(ptr, unsafe.Pointer(&elem)) - obj = valType.UnsafeIndirect(ptr) - } - unmarshaler := (obj).(encoding.TextUnmarshaler) - str := iter.ReadString() - err := unmarshaler.UnmarshalText([]byte(str)) - if err != nil { - iter.ReportError("textUnmarshalerDecoder", err.Error()) - } -} diff --git a/cmd/bpa-operator/vendor/github.com/json-iterator/go/reflect_native.go b/cmd/bpa-operator/vendor/github.com/json-iterator/go/reflect_native.go deleted file mode 100644 index 9042eb0..0000000 --- a/cmd/bpa-operator/vendor/github.com/json-iterator/go/reflect_native.go +++ /dev/null @@ -1,451 +0,0 @@ -package jsoniter - -import ( - "encoding/base64" - "reflect" - "strconv" - "unsafe" - - "github.com/modern-go/reflect2" -) - -const ptrSize = 32 << uintptr(^uintptr(0)>>63) - -func createEncoderOfNative(ctx *ctx, typ reflect2.Type) ValEncoder { - if typ.Kind() == reflect.Slice && typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 { - sliceDecoder := decoderOfSlice(ctx, typ) - return &base64Codec{sliceDecoder: sliceDecoder} - } - typeName := typ.String() - kind := typ.Kind() - switch kind { - case reflect.String: - if typeName != "string" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*string)(nil)).Elem()) - } - return &stringCodec{} - case reflect.Int: - if typeName != "int" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*int)(nil)).Elem()) - } - if strconv.IntSize == 32 { - return &int32Codec{} - } - return &int64Codec{} - case reflect.Int8: - if typeName != "int8" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*int8)(nil)).Elem()) - } - return &int8Codec{} - case reflect.Int16: - if typeName != "int16" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*int16)(nil)).Elem()) - } - return &int16Codec{} - case reflect.Int32: - if typeName != "int32" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*int32)(nil)).Elem()) - } - return &int32Codec{} - case reflect.Int64: - if typeName != "int64" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*int64)(nil)).Elem()) - } - return &int64Codec{} - case reflect.Uint: - if typeName != "uint" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*uint)(nil)).Elem()) - } - if strconv.IntSize == 32 { - return &uint32Codec{} - } - return &uint64Codec{} - case reflect.Uint8: - if typeName != "uint8" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*uint8)(nil)).Elem()) - } - return &uint8Codec{} - case reflect.Uint16: - if typeName != "uint16" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*uint16)(nil)).Elem()) - } - return &uint16Codec{} - case reflect.Uint32: - if typeName != "uint32" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*uint32)(nil)).Elem()) - } - return &uint32Codec{} - case reflect.Uintptr: - if typeName != "uintptr" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*uintptr)(nil)).Elem()) - } - if ptrSize == 32 { - return &uint32Codec{} - } - return &uint64Codec{} - case reflect.Uint64: - if typeName != "uint64" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*uint64)(nil)).Elem()) - } - return &uint64Codec{} - case reflect.Float32: - if typeName != "float32" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*float32)(nil)).Elem()) - } - return &float32Codec{} - case reflect.Float64: - if typeName != "float64" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*float64)(nil)).Elem()) - } - return &float64Codec{} - case reflect.Bool: - if typeName != "bool" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*bool)(nil)).Elem()) - } - return &boolCodec{} - } - return nil -} - -func createDecoderOfNative(ctx *ctx, typ reflect2.Type) ValDecoder { - if typ.Kind() == reflect.Slice && typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 { - sliceDecoder := decoderOfSlice(ctx, typ) - return &base64Codec{sliceDecoder: sliceDecoder} - } - typeName := typ.String() - switch typ.Kind() { - case reflect.String: - if typeName != "string" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*string)(nil)).Elem()) - } - return &stringCodec{} - case reflect.Int: - if typeName != "int" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*int)(nil)).Elem()) - } - if strconv.IntSize == 32 { - return &int32Codec{} - } - return &int64Codec{} - case reflect.Int8: - if typeName != "int8" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*int8)(nil)).Elem()) - } - return &int8Codec{} - case reflect.Int16: - if typeName != "int16" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*int16)(nil)).Elem()) - } - return &int16Codec{} - case reflect.Int32: - if typeName != "int32" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*int32)(nil)).Elem()) - } - return &int32Codec{} - case reflect.Int64: - if typeName != "int64" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*int64)(nil)).Elem()) - } - return &int64Codec{} - case reflect.Uint: - if typeName != "uint" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*uint)(nil)).Elem()) - } - if strconv.IntSize == 32 { - return &uint32Codec{} - } - return &uint64Codec{} - case reflect.Uint8: - if typeName != "uint8" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*uint8)(nil)).Elem()) - } - return &uint8Codec{} - case reflect.Uint16: - if typeName != "uint16" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*uint16)(nil)).Elem()) - } - return &uint16Codec{} - case reflect.Uint32: - if typeName != "uint32" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*uint32)(nil)).Elem()) - } - return &uint32Codec{} - case reflect.Uintptr: - if typeName != "uintptr" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*uintptr)(nil)).Elem()) - } - if ptrSize == 32 { - return &uint32Codec{} - } - return &uint64Codec{} - case reflect.Uint64: - if typeName != "uint64" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*uint64)(nil)).Elem()) - } - return &uint64Codec{} - case reflect.Float32: - if typeName != "float32" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*float32)(nil)).Elem()) - } - return &float32Codec{} - case reflect.Float64: - if typeName != "float64" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*float64)(nil)).Elem()) - } - return &float64Codec{} - case reflect.Bool: - if typeName != "bool" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*bool)(nil)).Elem()) - } - return &boolCodec{} - } - return nil -} - -type stringCodec struct { -} - -func (codec *stringCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*string)(ptr)) = iter.ReadString() -} - -func (codec *stringCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - str := *((*string)(ptr)) - stream.WriteString(str) -} - -func (codec *stringCodec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*string)(ptr)) == "" -} - -type int8Codec struct { -} - -func (codec *int8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*int8)(ptr)) = iter.ReadInt8() - } -} - -func (codec *int8Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteInt8(*((*int8)(ptr))) -} - -func (codec *int8Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*int8)(ptr)) == 0 -} - -type int16Codec struct { -} - -func (codec *int16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*int16)(ptr)) = iter.ReadInt16() - } -} - -func (codec *int16Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteInt16(*((*int16)(ptr))) -} - -func (codec *int16Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*int16)(ptr)) == 0 -} - -type int32Codec struct { -} - -func (codec *int32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*int32)(ptr)) = iter.ReadInt32() - } -} - -func (codec *int32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteInt32(*((*int32)(ptr))) -} - -func (codec *int32Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*int32)(ptr)) == 0 -} - -type int64Codec struct { -} - -func (codec *int64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*int64)(ptr)) = iter.ReadInt64() - } -} - -func (codec *int64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteInt64(*((*int64)(ptr))) -} - -func (codec *int64Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*int64)(ptr)) == 0 -} - -type uint8Codec struct { -} - -func (codec *uint8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*uint8)(ptr)) = iter.ReadUint8() - } -} - -func (codec *uint8Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteUint8(*((*uint8)(ptr))) -} - -func (codec *uint8Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*uint8)(ptr)) == 0 -} - -type uint16Codec struct { -} - -func (codec *uint16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*uint16)(ptr)) = iter.ReadUint16() - } -} - -func (codec *uint16Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteUint16(*((*uint16)(ptr))) -} - -func (codec *uint16Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*uint16)(ptr)) == 0 -} - -type uint32Codec struct { -} - -func (codec *uint32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*uint32)(ptr)) = iter.ReadUint32() - } -} - -func (codec *uint32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteUint32(*((*uint32)(ptr))) -} - -func (codec *uint32Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*uint32)(ptr)) == 0 -} - -type uint64Codec struct { -} - -func (codec *uint64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*uint64)(ptr)) = iter.ReadUint64() - } -} - -func (codec *uint64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteUint64(*((*uint64)(ptr))) -} - -func (codec *uint64Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*uint64)(ptr)) == 0 -} - -type float32Codec struct { -} - -func (codec *float32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*float32)(ptr)) = iter.ReadFloat32() - } -} - -func (codec *float32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteFloat32(*((*float32)(ptr))) -} - -func (codec *float32Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*float32)(ptr)) == 0 -} - -type float64Codec struct { -} - -func (codec *float64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*float64)(ptr)) = iter.ReadFloat64() - } -} - -func (codec *float64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteFloat64(*((*float64)(ptr))) -} - -func (codec *float64Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*float64)(ptr)) == 0 -} - -type boolCodec struct { -} - -func (codec *boolCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*bool)(ptr)) = iter.ReadBool() - } -} - -func (codec *boolCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteBool(*((*bool)(ptr))) -} - -func (codec *boolCodec) IsEmpty(ptr unsafe.Pointer) bool { - return !(*((*bool)(ptr))) -} - -type base64Codec struct { - sliceType *reflect2.UnsafeSliceType - sliceDecoder ValDecoder -} - -func (codec *base64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if iter.ReadNil() { - codec.sliceType.UnsafeSetNil(ptr) - return - } - switch iter.WhatIsNext() { - case StringValue: - src := iter.ReadString() - dst, err := base64.StdEncoding.DecodeString(src) - if err != nil { - iter.ReportError("decode base64", err.Error()) - } else { - codec.sliceType.UnsafeSet(ptr, unsafe.Pointer(&dst)) - } - case ArrayValue: - codec.sliceDecoder.Decode(ptr, iter) - default: - iter.ReportError("base64Codec", "invalid input") - } -} - -func (codec *base64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - src := *((*[]byte)(ptr)) - if len(src) == 0 { - stream.WriteNil() - return - } - encoding := base64.StdEncoding - stream.writeByte('"') - size := encoding.EncodedLen(len(src)) - buf := make([]byte, size) - encoding.Encode(buf, src) - stream.buf = append(stream.buf, buf...) - stream.writeByte('"') -} - -func (codec *base64Codec) IsEmpty(ptr unsafe.Pointer) bool { - return len(*((*[]byte)(ptr))) == 0 -} diff --git a/cmd/bpa-operator/vendor/github.com/json-iterator/go/reflect_optional.go b/cmd/bpa-operator/vendor/github.com/json-iterator/go/reflect_optional.go deleted file mode 100644 index 43ec71d..0000000 --- a/cmd/bpa-operator/vendor/github.com/json-iterator/go/reflect_optional.go +++ /dev/null @@ -1,133 +0,0 @@ -package jsoniter - -import ( - "github.com/modern-go/reflect2" - "reflect" - "unsafe" -) - -func decoderOfOptional(ctx *ctx, typ reflect2.Type) ValDecoder { - ptrType := typ.(*reflect2.UnsafePtrType) - elemType := ptrType.Elem() - decoder := decoderOfType(ctx, elemType) - if ctx.prefix == "" && elemType.Kind() == reflect.Ptr { - return &dereferenceDecoder{elemType, decoder} - } - return &OptionalDecoder{elemType, decoder} -} - -func encoderOfOptional(ctx *ctx, typ reflect2.Type) ValEncoder { - ptrType := typ.(*reflect2.UnsafePtrType) - elemType := ptrType.Elem() - elemEncoder := encoderOfType(ctx, elemType) - encoder := &OptionalEncoder{elemEncoder} - return encoder -} - -type OptionalDecoder struct { - ValueType reflect2.Type - ValueDecoder ValDecoder -} - -func (decoder *OptionalDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if iter.ReadNil() { - *((*unsafe.Pointer)(ptr)) = nil - } else { - if *((*unsafe.Pointer)(ptr)) == nil { - //pointer to null, we have to allocate memory to hold the value - newPtr := decoder.ValueType.UnsafeNew() - decoder.ValueDecoder.Decode(newPtr, iter) - *((*unsafe.Pointer)(ptr)) = newPtr - } else { - //reuse existing instance - decoder.ValueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter) - } - } -} - -type dereferenceDecoder struct { - // only to deference a pointer - valueType reflect2.Type - valueDecoder ValDecoder -} - -func (decoder *dereferenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if *((*unsafe.Pointer)(ptr)) == nil { - //pointer to null, we have to allocate memory to hold the value - newPtr := decoder.valueType.UnsafeNew() - decoder.valueDecoder.Decode(newPtr, iter) - *((*unsafe.Pointer)(ptr)) = newPtr - } else { - //reuse existing instance - decoder.valueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter) - } -} - -type OptionalEncoder struct { - ValueEncoder ValEncoder -} - -func (encoder *OptionalEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - if *((*unsafe.Pointer)(ptr)) == nil { - stream.WriteNil() - } else { - encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream) - } -} - -func (encoder *OptionalEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return *((*unsafe.Pointer)(ptr)) == nil -} - -type dereferenceEncoder struct { - ValueEncoder ValEncoder -} - -func (encoder *dereferenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - if *((*unsafe.Pointer)(ptr)) == nil { - stream.WriteNil() - } else { - encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream) - } -} - -func (encoder *dereferenceEncoder) IsEmpty(ptr unsafe.Pointer) bool { - dePtr := *((*unsafe.Pointer)(ptr)) - if dePtr == nil { - return true - } - return encoder.ValueEncoder.IsEmpty(dePtr) -} - -func (encoder *dereferenceEncoder) IsEmbeddedPtrNil(ptr unsafe.Pointer) bool { - deReferenced := *((*unsafe.Pointer)(ptr)) - if deReferenced == nil { - return true - } - isEmbeddedPtrNil, converted := encoder.ValueEncoder.(IsEmbeddedPtrNil) - if !converted { - return false - } - fieldPtr := unsafe.Pointer(deReferenced) - return isEmbeddedPtrNil.IsEmbeddedPtrNil(fieldPtr) -} - -type referenceEncoder struct { - encoder ValEncoder -} - -func (encoder *referenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - encoder.encoder.Encode(unsafe.Pointer(&ptr), stream) -} - -func (encoder *referenceEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr)) -} - -type referenceDecoder struct { - decoder ValDecoder -} - -func (decoder *referenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - decoder.decoder.Decode(unsafe.Pointer(&ptr), iter) -} diff --git a/cmd/bpa-operator/vendor/github.com/json-iterator/go/reflect_slice.go b/cmd/bpa-operator/vendor/github.com/json-iterator/go/reflect_slice.go deleted file mode 100644 index 9441d79..0000000 --- a/cmd/bpa-operator/vendor/github.com/json-iterator/go/reflect_slice.go +++ /dev/null @@ -1,99 +0,0 @@ -package jsoniter - -import ( - "fmt" - "github.com/modern-go/reflect2" - "io" - "unsafe" -) - -func decoderOfSlice(ctx *ctx, typ reflect2.Type) ValDecoder { - sliceType := typ.(*reflect2.UnsafeSliceType) - decoder := decoderOfType(ctx.append("[sliceElem]"), sliceType.Elem()) - return &sliceDecoder{sliceType, decoder} -} - -func encoderOfSlice(ctx *ctx, typ reflect2.Type) ValEncoder { - sliceType := typ.(*reflect2.UnsafeSliceType) - encoder := encoderOfType(ctx.append("[sliceElem]"), sliceType.Elem()) - return &sliceEncoder{sliceType, encoder} -} - -type sliceEncoder struct { - sliceType *reflect2.UnsafeSliceType - elemEncoder ValEncoder -} - -func (encoder *sliceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - if encoder.sliceType.UnsafeIsNil(ptr) { - stream.WriteNil() - return - } - length := encoder.sliceType.UnsafeLengthOf(ptr) - if length == 0 { - stream.WriteEmptyArray() - return - } - stream.WriteArrayStart() - encoder.elemEncoder.Encode(encoder.sliceType.UnsafeGetIndex(ptr, 0), stream) - for i := 1; i < length; i++ { - stream.WriteMore() - elemPtr := encoder.sliceType.UnsafeGetIndex(ptr, i) - encoder.elemEncoder.Encode(elemPtr, stream) - } - stream.WriteArrayEnd() - if stream.Error != nil && stream.Error != io.EOF { - stream.Error = fmt.Errorf("%v: %s", encoder.sliceType, stream.Error.Error()) - } -} - -func (encoder *sliceEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.sliceType.UnsafeLengthOf(ptr) == 0 -} - -type sliceDecoder struct { - sliceType *reflect2.UnsafeSliceType - elemDecoder ValDecoder -} - -func (decoder *sliceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - decoder.doDecode(ptr, iter) - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.sliceType, iter.Error.Error()) - } -} - -func (decoder *sliceDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) { - c := iter.nextToken() - sliceType := decoder.sliceType - if c == 'n' { - iter.skipThreeBytes('u', 'l', 'l') - sliceType.UnsafeSetNil(ptr) - return - } - if c != '[' { - iter.ReportError("decode slice", "expect [ or n, but found "+string([]byte{c})) - return - } - c = iter.nextToken() - if c == ']' { - sliceType.UnsafeSet(ptr, sliceType.UnsafeMakeSlice(0, 0)) - return - } - iter.unreadByte() - sliceType.UnsafeGrow(ptr, 1) - elemPtr := sliceType.UnsafeGetIndex(ptr, 0) - decoder.elemDecoder.Decode(elemPtr, iter) - length := 1 - for c = iter.nextToken(); c == ','; c = iter.nextToken() { - idx := length - length += 1 - sliceType.UnsafeGrow(ptr, length) - elemPtr = sliceType.UnsafeGetIndex(ptr, idx) - decoder.elemDecoder.Decode(elemPtr, iter) - } - if c != ']' { - iter.ReportError("decode slice", "expect ], but found "+string([]byte{c})) - return - } -} diff --git a/cmd/bpa-operator/vendor/github.com/json-iterator/go/reflect_struct_decoder.go b/cmd/bpa-operator/vendor/github.com/json-iterator/go/reflect_struct_decoder.go deleted file mode 100644 index 355d2d1..0000000 --- a/cmd/bpa-operator/vendor/github.com/json-iterator/go/reflect_struct_decoder.go +++ /dev/null @@ -1,1048 +0,0 @@ -package jsoniter - -import ( - "fmt" - "io" - "strings" - "unsafe" - - "github.com/modern-go/reflect2" -) - -func decoderOfStruct(ctx *ctx, typ reflect2.Type) ValDecoder { - bindings := map[string]*Binding{} - structDescriptor := describeStruct(ctx, typ) - for _, binding := range structDescriptor.Fields { - for _, fromName := range binding.FromNames { - old := bindings[fromName] - if old == nil { - bindings[fromName] = binding - continue - } - ignoreOld, ignoreNew := resolveConflictBinding(ctx.frozenConfig, old, binding) - if ignoreOld { - delete(bindings, fromName) - } - if !ignoreNew { - bindings[fromName] = binding - } - } - } - fields := map[string]*structFieldDecoder{} - for k, binding := range bindings { - fields[k] = binding.Decoder.(*structFieldDecoder) - } - - if !ctx.caseSensitive() { - for k, binding := range bindings { - if _, found := fields[strings.ToLower(k)]; !found { - fields[strings.ToLower(k)] = binding.Decoder.(*structFieldDecoder) - } - } - } - - return createStructDecoder(ctx, typ, fields) -} - -func createStructDecoder(ctx *ctx, typ reflect2.Type, fields map[string]*structFieldDecoder) ValDecoder { - if ctx.disallowUnknownFields { - return &generalStructDecoder{typ: typ, fields: fields, disallowUnknownFields: true} - } - knownHash := map[int64]struct{}{ - 0: {}, - } - - switch len(fields) { - case 0: - return &skipObjectDecoder{typ} - case 1: - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName, ctx.caseSensitive()) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields, false} - } - knownHash[fieldHash] = struct{}{} - return &oneFieldStructDecoder{typ, fieldHash, fieldDecoder} - } - case 2: - var fieldHash1 int64 - var fieldHash2 int64 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName, ctx.caseSensitive()) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields, false} - } - knownHash[fieldHash] = struct{}{} - if fieldHash1 == 0 { - fieldHash1 = fieldHash - fieldDecoder1 = fieldDecoder - } else { - fieldHash2 = fieldHash - fieldDecoder2 = fieldDecoder - } - } - return &twoFieldsStructDecoder{typ, fieldHash1, fieldDecoder1, fieldHash2, fieldDecoder2} - case 3: - var fieldName1 int64 - var fieldName2 int64 - var fieldName3 int64 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - var fieldDecoder3 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName, ctx.caseSensitive()) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields, false} - } - knownHash[fieldHash] = struct{}{} - if fieldName1 == 0 { - fieldName1 = fieldHash - fieldDecoder1 = fieldDecoder - } else if fieldName2 == 0 { - fieldName2 = fieldHash - fieldDecoder2 = fieldDecoder - } else { - fieldName3 = fieldHash - fieldDecoder3 = fieldDecoder - } - } - return &threeFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, - fieldName2, fieldDecoder2, - fieldName3, fieldDecoder3} - case 4: - var fieldName1 int64 - var fieldName2 int64 - var fieldName3 int64 - var fieldName4 int64 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - var fieldDecoder3 *structFieldDecoder - var fieldDecoder4 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName, ctx.caseSensitive()) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields, false} - } - knownHash[fieldHash] = struct{}{} - if fieldName1 == 0 { - fieldName1 = fieldHash - fieldDecoder1 = fieldDecoder - } else if fieldName2 == 0 { - fieldName2 = fieldHash - fieldDecoder2 = fieldDecoder - } else if fieldName3 == 0 { - fieldName3 = fieldHash - fieldDecoder3 = fieldDecoder - } else { - fieldName4 = fieldHash - fieldDecoder4 = fieldDecoder - } - } - return &fourFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, - fieldName2, fieldDecoder2, - fieldName3, fieldDecoder3, - fieldName4, fieldDecoder4} - case 5: - var fieldName1 int64 - var fieldName2 int64 - var fieldName3 int64 - var fieldName4 int64 - var fieldName5 int64 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - var fieldDecoder3 *structFieldDecoder - var fieldDecoder4 *structFieldDecoder - var fieldDecoder5 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName, ctx.caseSensitive()) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields, false} - } - knownHash[fieldHash] = struct{}{} - if fieldName1 == 0 { - fieldName1 = fieldHash - fieldDecoder1 = fieldDecoder - } else if fieldName2 == 0 { - fieldName2 = fieldHash - fieldDecoder2 = fieldDecoder - } else if fieldName3 == 0 { - fieldName3 = fieldHash - fieldDecoder3 = fieldDecoder - } else if fieldName4 == 0 { - fieldName4 = fieldHash - fieldDecoder4 = fieldDecoder - } else { - fieldName5 = fieldHash - fieldDecoder5 = fieldDecoder - } - } - return &fiveFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, - fieldName2, fieldDecoder2, - fieldName3, fieldDecoder3, - fieldName4, fieldDecoder4, - fieldName5, fieldDecoder5} - case 6: - var fieldName1 int64 - var fieldName2 int64 - var fieldName3 int64 - var fieldName4 int64 - var fieldName5 int64 - var fieldName6 int64 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - var fieldDecoder3 *structFieldDecoder - var fieldDecoder4 *structFieldDecoder - var fieldDecoder5 *structFieldDecoder - var fieldDecoder6 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName, ctx.caseSensitive()) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields, false} - } - knownHash[fieldHash] = struct{}{} - if fieldName1 == 0 { - fieldName1 = fieldHash - fieldDecoder1 = fieldDecoder - } else if fieldName2 == 0 { - fieldName2 = fieldHash - fieldDecoder2 = fieldDecoder - } else if fieldName3 == 0 { - fieldName3 = fieldHash - fieldDecoder3 = fieldDecoder - } else if fieldName4 == 0 { - fieldName4 = fieldHash - fieldDecoder4 = fieldDecoder - } else if fieldName5 == 0 { - fieldName5 = fieldHash - fieldDecoder5 = fieldDecoder - } else { - fieldName6 = fieldHash - fieldDecoder6 = fieldDecoder - } - } - return &sixFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, - fieldName2, fieldDecoder2, - fieldName3, fieldDecoder3, - fieldName4, fieldDecoder4, - fieldName5, fieldDecoder5, - fieldName6, fieldDecoder6} - case 7: - var fieldName1 int64 - var fieldName2 int64 - var fieldName3 int64 - var fieldName4 int64 - var fieldName5 int64 - var fieldName6 int64 - var fieldName7 int64 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - var fieldDecoder3 *structFieldDecoder - var fieldDecoder4 *structFieldDecoder - var fieldDecoder5 *structFieldDecoder - var fieldDecoder6 *structFieldDecoder - var fieldDecoder7 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName, ctx.caseSensitive()) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields, false} - } - knownHash[fieldHash] = struct{}{} - if fieldName1 == 0 { - fieldName1 = fieldHash - fieldDecoder1 = fieldDecoder - } else if fieldName2 == 0 { - fieldName2 = fieldHash - fieldDecoder2 = fieldDecoder - } else if fieldName3 == 0 { - fieldName3 = fieldHash - fieldDecoder3 = fieldDecoder - } else if fieldName4 == 0 { - fieldName4 = fieldHash - fieldDecoder4 = fieldDecoder - } else if fieldName5 == 0 { - fieldName5 = fieldHash - fieldDecoder5 = fieldDecoder - } else if fieldName6 == 0 { - fieldName6 = fieldHash - fieldDecoder6 = fieldDecoder - } else { - fieldName7 = fieldHash - fieldDecoder7 = fieldDecoder - } - } - return &sevenFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, - fieldName2, fieldDecoder2, - fieldName3, fieldDecoder3, - fieldName4, fieldDecoder4, - fieldName5, fieldDecoder5, - fieldName6, fieldDecoder6, - fieldName7, fieldDecoder7} - case 8: - var fieldName1 int64 - var fieldName2 int64 - var fieldName3 int64 - var fieldName4 int64 - var fieldName5 int64 - var fieldName6 int64 - var fieldName7 int64 - var fieldName8 int64 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - var fieldDecoder3 *structFieldDecoder - var fieldDecoder4 *structFieldDecoder - var fieldDecoder5 *structFieldDecoder - var fieldDecoder6 *structFieldDecoder - var fieldDecoder7 *structFieldDecoder - var fieldDecoder8 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName, ctx.caseSensitive()) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields, false} - } - knownHash[fieldHash] = struct{}{} - if fieldName1 == 0 { - fieldName1 = fieldHash - fieldDecoder1 = fieldDecoder - } else if fieldName2 == 0 { - fieldName2 = fieldHash - fieldDecoder2 = fieldDecoder - } else if fieldName3 == 0 { - fieldName3 = fieldHash - fieldDecoder3 = fieldDecoder - } else if fieldName4 == 0 { - fieldName4 = fieldHash - fieldDecoder4 = fieldDecoder - } else if fieldName5 == 0 { - fieldName5 = fieldHash - fieldDecoder5 = fieldDecoder - } else if fieldName6 == 0 { - fieldName6 = fieldHash - fieldDecoder6 = fieldDecoder - } else if fieldName7 == 0 { - fieldName7 = fieldHash - fieldDecoder7 = fieldDecoder - } else { - fieldName8 = fieldHash - fieldDecoder8 = fieldDecoder - } - } - return &eightFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, - fieldName2, fieldDecoder2, - fieldName3, fieldDecoder3, - fieldName4, fieldDecoder4, - fieldName5, fieldDecoder5, - fieldName6, fieldDecoder6, - fieldName7, fieldDecoder7, - fieldName8, fieldDecoder8} - case 9: - var fieldName1 int64 - var fieldName2 int64 - var fieldName3 int64 - var fieldName4 int64 - var fieldName5 int64 - var fieldName6 int64 - var fieldName7 int64 - var fieldName8 int64 - var fieldName9 int64 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - var fieldDecoder3 *structFieldDecoder - var fieldDecoder4 *structFieldDecoder - var fieldDecoder5 *structFieldDecoder - var fieldDecoder6 *structFieldDecoder - var fieldDecoder7 *structFieldDecoder - var fieldDecoder8 *structFieldDecoder - var fieldDecoder9 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName, ctx.caseSensitive()) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields, false} - } - knownHash[fieldHash] = struct{}{} - if fieldName1 == 0 { - fieldName1 = fieldHash - fieldDecoder1 = fieldDecoder - } else if fieldName2 == 0 { - fieldName2 = fieldHash - fieldDecoder2 = fieldDecoder - } else if fieldName3 == 0 { - fieldName3 = fieldHash - fieldDecoder3 = fieldDecoder - } else if fieldName4 == 0 { - fieldName4 = fieldHash - fieldDecoder4 = fieldDecoder - } else if fieldName5 == 0 { - fieldName5 = fieldHash - fieldDecoder5 = fieldDecoder - } else if fieldName6 == 0 { - fieldName6 = fieldHash - fieldDecoder6 = fieldDecoder - } else if fieldName7 == 0 { - fieldName7 = fieldHash - fieldDecoder7 = fieldDecoder - } else if fieldName8 == 0 { - fieldName8 = fieldHash - fieldDecoder8 = fieldDecoder - } else { - fieldName9 = fieldHash - fieldDecoder9 = fieldDecoder - } - } - return &nineFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, - fieldName2, fieldDecoder2, - fieldName3, fieldDecoder3, - fieldName4, fieldDecoder4, - fieldName5, fieldDecoder5, - fieldName6, fieldDecoder6, - fieldName7, fieldDecoder7, - fieldName8, fieldDecoder8, - fieldName9, fieldDecoder9} - case 10: - var fieldName1 int64 - var fieldName2 int64 - var fieldName3 int64 - var fieldName4 int64 - var fieldName5 int64 - var fieldName6 int64 - var fieldName7 int64 - var fieldName8 int64 - var fieldName9 int64 - var fieldName10 int64 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - var fieldDecoder3 *structFieldDecoder - var fieldDecoder4 *structFieldDecoder - var fieldDecoder5 *structFieldDecoder - var fieldDecoder6 *structFieldDecoder - var fieldDecoder7 *structFieldDecoder - var fieldDecoder8 *structFieldDecoder - var fieldDecoder9 *structFieldDecoder - var fieldDecoder10 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName, ctx.caseSensitive()) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields, false} - } - knownHash[fieldHash] = struct{}{} - if fieldName1 == 0 { - fieldName1 = fieldHash - fieldDecoder1 = fieldDecoder - } else if fieldName2 == 0 { - fieldName2 = fieldHash - fieldDecoder2 = fieldDecoder - } else if fieldName3 == 0 { - fieldName3 = fieldHash - fieldDecoder3 = fieldDecoder - } else if fieldName4 == 0 { - fieldName4 = fieldHash - fieldDecoder4 = fieldDecoder - } else if fieldName5 == 0 { - fieldName5 = fieldHash - fieldDecoder5 = fieldDecoder - } else if fieldName6 == 0 { - fieldName6 = fieldHash - fieldDecoder6 = fieldDecoder - } else if fieldName7 == 0 { - fieldName7 = fieldHash - fieldDecoder7 = fieldDecoder - } else if fieldName8 == 0 { - fieldName8 = fieldHash - fieldDecoder8 = fieldDecoder - } else if fieldName9 == 0 { - fieldName9 = fieldHash - fieldDecoder9 = fieldDecoder - } else { - fieldName10 = fieldHash - fieldDecoder10 = fieldDecoder - } - } - return &tenFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, - fieldName2, fieldDecoder2, - fieldName3, fieldDecoder3, - fieldName4, fieldDecoder4, - fieldName5, fieldDecoder5, - fieldName6, fieldDecoder6, - fieldName7, fieldDecoder7, - fieldName8, fieldDecoder8, - fieldName9, fieldDecoder9, - fieldName10, fieldDecoder10} - } - return &generalStructDecoder{typ, fields, false} -} - -type generalStructDecoder struct { - typ reflect2.Type - fields map[string]*structFieldDecoder - disallowUnknownFields bool -} - -func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - var c byte - for c = ','; c == ','; c = iter.nextToken() { - decoder.decodeOneField(ptr, iter) - } - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) - } - if c != '}' { - iter.ReportError("struct Decode", `expect }, but found `+string([]byte{c})) - } -} - -func (decoder *generalStructDecoder) decodeOneField(ptr unsafe.Pointer, iter *Iterator) { - var field string - var fieldDecoder *structFieldDecoder - if iter.cfg.objectFieldMustBeSimpleString { - fieldBytes := iter.ReadStringAsSlice() - field = *(*string)(unsafe.Pointer(&fieldBytes)) - fieldDecoder = decoder.fields[field] - if fieldDecoder == nil && !iter.cfg.caseSensitive { - fieldDecoder = decoder.fields[strings.ToLower(field)] - } - } else { - field = iter.ReadString() - fieldDecoder = decoder.fields[field] - if fieldDecoder == nil && !iter.cfg.caseSensitive { - fieldDecoder = decoder.fields[strings.ToLower(field)] - } - } - if fieldDecoder == nil { - msg := "found unknown field: " + field - if decoder.disallowUnknownFields { - iter.ReportError("ReadObject", msg) - } - c := iter.nextToken() - if c != ':' { - iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) - } - iter.Skip() - return - } - c := iter.nextToken() - if c != ':' { - iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) - } - fieldDecoder.Decode(ptr, iter) -} - -type skipObjectDecoder struct { - typ reflect2.Type -} - -func (decoder *skipObjectDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - valueType := iter.WhatIsNext() - if valueType != ObjectValue && valueType != NilValue { - iter.ReportError("skipObjectDecoder", "expect object or null") - return - } - iter.Skip() -} - -type oneFieldStructDecoder struct { - typ reflect2.Type - fieldHash int64 - fieldDecoder *structFieldDecoder -} - -func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - for { - if iter.readFieldHash() == decoder.fieldHash { - decoder.fieldDecoder.Decode(ptr, iter) - } else { - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) - } -} - -type twoFieldsStructDecoder struct { - typ reflect2.Type - fieldHash1 int64 - fieldDecoder1 *structFieldDecoder - fieldHash2 int64 - fieldDecoder2 *structFieldDecoder -} - -func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) - } -} - -type threeFieldsStructDecoder struct { - typ reflect2.Type - fieldHash1 int64 - fieldDecoder1 *structFieldDecoder - fieldHash2 int64 - fieldDecoder2 *structFieldDecoder - fieldHash3 int64 - fieldDecoder3 *structFieldDecoder -} - -func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - case decoder.fieldHash3: - decoder.fieldDecoder3.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) - } -} - -type fourFieldsStructDecoder struct { - typ reflect2.Type - fieldHash1 int64 - fieldDecoder1 *structFieldDecoder - fieldHash2 int64 - fieldDecoder2 *structFieldDecoder - fieldHash3 int64 - fieldDecoder3 *structFieldDecoder - fieldHash4 int64 - fieldDecoder4 *structFieldDecoder -} - -func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - case decoder.fieldHash3: - decoder.fieldDecoder3.Decode(ptr, iter) - case decoder.fieldHash4: - decoder.fieldDecoder4.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) - } -} - -type fiveFieldsStructDecoder struct { - typ reflect2.Type - fieldHash1 int64 - fieldDecoder1 *structFieldDecoder - fieldHash2 int64 - fieldDecoder2 *structFieldDecoder - fieldHash3 int64 - fieldDecoder3 *structFieldDecoder - fieldHash4 int64 - fieldDecoder4 *structFieldDecoder - fieldHash5 int64 - fieldDecoder5 *structFieldDecoder -} - -func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - case decoder.fieldHash3: - decoder.fieldDecoder3.Decode(ptr, iter) - case decoder.fieldHash4: - decoder.fieldDecoder4.Decode(ptr, iter) - case decoder.fieldHash5: - decoder.fieldDecoder5.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) - } -} - -type sixFieldsStructDecoder struct { - typ reflect2.Type - fieldHash1 int64 - fieldDecoder1 *structFieldDecoder - fieldHash2 int64 - fieldDecoder2 *structFieldDecoder - fieldHash3 int64 - fieldDecoder3 *structFieldDecoder - fieldHash4 int64 - fieldDecoder4 *structFieldDecoder - fieldHash5 int64 - fieldDecoder5 *structFieldDecoder - fieldHash6 int64 - fieldDecoder6 *structFieldDecoder -} - -func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - case decoder.fieldHash3: - decoder.fieldDecoder3.Decode(ptr, iter) - case decoder.fieldHash4: - decoder.fieldDecoder4.Decode(ptr, iter) - case decoder.fieldHash5: - decoder.fieldDecoder5.Decode(ptr, iter) - case decoder.fieldHash6: - decoder.fieldDecoder6.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) - } -} - -type sevenFieldsStructDecoder struct { - typ reflect2.Type - fieldHash1 int64 - fieldDecoder1 *structFieldDecoder - fieldHash2 int64 - fieldDecoder2 *structFieldDecoder - fieldHash3 int64 - fieldDecoder3 *structFieldDecoder - fieldHash4 int64 - fieldDecoder4 *structFieldDecoder - fieldHash5 int64 - fieldDecoder5 *structFieldDecoder - fieldHash6 int64 - fieldDecoder6 *structFieldDecoder - fieldHash7 int64 - fieldDecoder7 *structFieldDecoder -} - -func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - case decoder.fieldHash3: - decoder.fieldDecoder3.Decode(ptr, iter) - case decoder.fieldHash4: - decoder.fieldDecoder4.Decode(ptr, iter) - case decoder.fieldHash5: - decoder.fieldDecoder5.Decode(ptr, iter) - case decoder.fieldHash6: - decoder.fieldDecoder6.Decode(ptr, iter) - case decoder.fieldHash7: - decoder.fieldDecoder7.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) - } -} - -type eightFieldsStructDecoder struct { - typ reflect2.Type - fieldHash1 int64 - fieldDecoder1 *structFieldDecoder - fieldHash2 int64 - fieldDecoder2 *structFieldDecoder - fieldHash3 int64 - fieldDecoder3 *structFieldDecoder - fieldHash4 int64 - fieldDecoder4 *structFieldDecoder - fieldHash5 int64 - fieldDecoder5 *structFieldDecoder - fieldHash6 int64 - fieldDecoder6 *structFieldDecoder - fieldHash7 int64 - fieldDecoder7 *structFieldDecoder - fieldHash8 int64 - fieldDecoder8 *structFieldDecoder -} - -func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - case decoder.fieldHash3: - decoder.fieldDecoder3.Decode(ptr, iter) - case decoder.fieldHash4: - decoder.fieldDecoder4.Decode(ptr, iter) - case decoder.fieldHash5: - decoder.fieldDecoder5.Decode(ptr, iter) - case decoder.fieldHash6: - decoder.fieldDecoder6.Decode(ptr, iter) - case decoder.fieldHash7: - decoder.fieldDecoder7.Decode(ptr, iter) - case decoder.fieldHash8: - decoder.fieldDecoder8.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) - } -} - -type nineFieldsStructDecoder struct { - typ reflect2.Type - fieldHash1 int64 - fieldDecoder1 *structFieldDecoder - fieldHash2 int64 - fieldDecoder2 *structFieldDecoder - fieldHash3 int64 - fieldDecoder3 *structFieldDecoder - fieldHash4 int64 - fieldDecoder4 *structFieldDecoder - fieldHash5 int64 - fieldDecoder5 *structFieldDecoder - fieldHash6 int64 - fieldDecoder6 *structFieldDecoder - fieldHash7 int64 - fieldDecoder7 *structFieldDecoder - fieldHash8 int64 - fieldDecoder8 *structFieldDecoder - fieldHash9 int64 - fieldDecoder9 *structFieldDecoder -} - -func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - case decoder.fieldHash3: - decoder.fieldDecoder3.Decode(ptr, iter) - case decoder.fieldHash4: - decoder.fieldDecoder4.Decode(ptr, iter) - case decoder.fieldHash5: - decoder.fieldDecoder5.Decode(ptr, iter) - case decoder.fieldHash6: - decoder.fieldDecoder6.Decode(ptr, iter) - case decoder.fieldHash7: - decoder.fieldDecoder7.Decode(ptr, iter) - case decoder.fieldHash8: - decoder.fieldDecoder8.Decode(ptr, iter) - case decoder.fieldHash9: - decoder.fieldDecoder9.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) - } -} - -type tenFieldsStructDecoder struct { - typ reflect2.Type - fieldHash1 int64 - fieldDecoder1 *structFieldDecoder - fieldHash2 int64 - fieldDecoder2 *structFieldDecoder - fieldHash3 int64 - fieldDecoder3 *structFieldDecoder - fieldHash4 int64 - fieldDecoder4 *structFieldDecoder - fieldHash5 int64 - fieldDecoder5 *structFieldDecoder - fieldHash6 int64 - fieldDecoder6 *structFieldDecoder - fieldHash7 int64 - fieldDecoder7 *structFieldDecoder - fieldHash8 int64 - fieldDecoder8 *structFieldDecoder - fieldHash9 int64 - fieldDecoder9 *structFieldDecoder - fieldHash10 int64 - fieldDecoder10 *structFieldDecoder -} - -func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - case decoder.fieldHash3: - decoder.fieldDecoder3.Decode(ptr, iter) - case decoder.fieldHash4: - decoder.fieldDecoder4.Decode(ptr, iter) - case decoder.fieldHash5: - decoder.fieldDecoder5.Decode(ptr, iter) - case decoder.fieldHash6: - decoder.fieldDecoder6.Decode(ptr, iter) - case decoder.fieldHash7: - decoder.fieldDecoder7.Decode(ptr, iter) - case decoder.fieldHash8: - decoder.fieldDecoder8.Decode(ptr, iter) - case decoder.fieldHash9: - decoder.fieldDecoder9.Decode(ptr, iter) - case decoder.fieldHash10: - decoder.fieldDecoder10.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) - } -} - -type structFieldDecoder struct { - field reflect2.StructField - fieldDecoder ValDecoder -} - -func (decoder *structFieldDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - fieldPtr := decoder.field.UnsafeGet(ptr) - decoder.fieldDecoder.Decode(fieldPtr, iter) - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%s: %s", decoder.field.Name(), iter.Error.Error()) - } -} - -type stringModeStringDecoder struct { - elemDecoder ValDecoder - cfg *frozenConfig -} - -func (decoder *stringModeStringDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - decoder.elemDecoder.Decode(ptr, iter) - str := *((*string)(ptr)) - tempIter := decoder.cfg.BorrowIterator([]byte(str)) - defer decoder.cfg.ReturnIterator(tempIter) - *((*string)(ptr)) = tempIter.ReadString() -} - -type stringModeNumberDecoder struct { - elemDecoder ValDecoder -} - -func (decoder *stringModeNumberDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - c := iter.nextToken() - if c != '"' { - iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c})) - return - } - decoder.elemDecoder.Decode(ptr, iter) - if iter.Error != nil { - return - } - c = iter.readByte() - if c != '"' { - iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c})) - return - } -} diff --git a/cmd/bpa-operator/vendor/github.com/json-iterator/go/reflect_struct_encoder.go b/cmd/bpa-operator/vendor/github.com/json-iterator/go/reflect_struct_encoder.go deleted file mode 100644 index d0759cf..0000000 --- a/cmd/bpa-operator/vendor/github.com/json-iterator/go/reflect_struct_encoder.go +++ /dev/null @@ -1,210 +0,0 @@ -package jsoniter - -import ( - "fmt" - "github.com/modern-go/reflect2" - "io" - "reflect" - "unsafe" -) - -func encoderOfStruct(ctx *ctx, typ reflect2.Type) ValEncoder { - type bindingTo struct { - binding *Binding - toName string - ignored bool - } - orderedBindings := []*bindingTo{} - structDescriptor := describeStruct(ctx, typ) - for _, binding := range structDescriptor.Fields { - for _, toName := range binding.ToNames { - new := &bindingTo{ - binding: binding, - toName: toName, - } - for _, old := range orderedBindings { - if old.toName != toName { - continue - } - old.ignored, new.ignored = resolveConflictBinding(ctx.frozenConfig, old.binding, new.binding) - } - orderedBindings = append(orderedBindings, new) - } - } - if len(orderedBindings) == 0 { - return &emptyStructEncoder{} - } - finalOrderedFields := []structFieldTo{} - for _, bindingTo := range orderedBindings { - if !bindingTo.ignored { - finalOrderedFields = append(finalOrderedFields, structFieldTo{ - encoder: bindingTo.binding.Encoder.(*structFieldEncoder), - toName: bindingTo.toName, - }) - } - } - return &structEncoder{typ, finalOrderedFields} -} - -func createCheckIsEmpty(ctx *ctx, typ reflect2.Type) checkIsEmpty { - encoder := createEncoderOfNative(ctx, typ) - if encoder != nil { - return encoder - } - kind := typ.Kind() - switch kind { - case reflect.Interface: - return &dynamicEncoder{typ} - case reflect.Struct: - return &structEncoder{typ: typ} - case reflect.Array: - return &arrayEncoder{} - case reflect.Slice: - return &sliceEncoder{} - case reflect.Map: - return encoderOfMap(ctx, typ) - case reflect.Ptr: - return &OptionalEncoder{} - default: - return &lazyErrorEncoder{err: fmt.Errorf("unsupported type: %v", typ)} - } -} - -func resolveConflictBinding(cfg *frozenConfig, old, new *Binding) (ignoreOld, ignoreNew bool) { - newTagged := new.Field.Tag().Get(cfg.getTagKey()) != "" - oldTagged := old.Field.Tag().Get(cfg.getTagKey()) != "" - if newTagged { - if oldTagged { - if len(old.levels) > len(new.levels) { - return true, false - } else if len(new.levels) > len(old.levels) { - return false, true - } else { - return true, true - } - } else { - return true, false - } - } else { - if oldTagged { - return true, false - } - if len(old.levels) > len(new.levels) { - return true, false - } else if len(new.levels) > len(old.levels) { - return false, true - } else { - return true, true - } - } -} - -type structFieldEncoder struct { - field reflect2.StructField - fieldEncoder ValEncoder - omitempty bool -} - -func (encoder *structFieldEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - fieldPtr := encoder.field.UnsafeGet(ptr) - encoder.fieldEncoder.Encode(fieldPtr, stream) - if stream.Error != nil && stream.Error != io.EOF { - stream.Error = fmt.Errorf("%s: %s", encoder.field.Name(), stream.Error.Error()) - } -} - -func (encoder *structFieldEncoder) IsEmpty(ptr unsafe.Pointer) bool { - fieldPtr := encoder.field.UnsafeGet(ptr) - return encoder.fieldEncoder.IsEmpty(fieldPtr) -} - -func (encoder *structFieldEncoder) IsEmbeddedPtrNil(ptr unsafe.Pointer) bool { - isEmbeddedPtrNil, converted := encoder.fieldEncoder.(IsEmbeddedPtrNil) - if !converted { - return false - } - fieldPtr := encoder.field.UnsafeGet(ptr) - return isEmbeddedPtrNil.IsEmbeddedPtrNil(fieldPtr) -} - -type IsEmbeddedPtrNil interface { - IsEmbeddedPtrNil(ptr unsafe.Pointer) bool -} - -type structEncoder struct { - typ reflect2.Type - fields []structFieldTo -} - -type structFieldTo struct { - encoder *structFieldEncoder - toName string -} - -func (encoder *structEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteObjectStart() - isNotFirst := false - for _, field := range encoder.fields { - if field.encoder.omitempty && field.encoder.IsEmpty(ptr) { - continue - } - if field.encoder.IsEmbeddedPtrNil(ptr) { - continue - } - if isNotFirst { - stream.WriteMore() - } - stream.WriteObjectField(field.toName) - field.encoder.Encode(ptr, stream) - isNotFirst = true - } - stream.WriteObjectEnd() - if stream.Error != nil && stream.Error != io.EOF { - stream.Error = fmt.Errorf("%v.%s", encoder.typ, stream.Error.Error()) - } -} - -func (encoder *structEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return false -} - -type emptyStructEncoder struct { -} - -func (encoder *emptyStructEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteEmptyObject() -} - -func (encoder *emptyStructEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return false -} - -type stringModeNumberEncoder struct { - elemEncoder ValEncoder -} - -func (encoder *stringModeNumberEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.writeByte('"') - encoder.elemEncoder.Encode(ptr, stream) - stream.writeByte('"') -} - -func (encoder *stringModeNumberEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.elemEncoder.IsEmpty(ptr) -} - -type stringModeStringEncoder struct { - elemEncoder ValEncoder - cfg *frozenConfig -} - -func (encoder *stringModeStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - tempStream := encoder.cfg.BorrowStream(nil) - defer encoder.cfg.ReturnStream(tempStream) - encoder.elemEncoder.Encode(ptr, tempStream) - stream.WriteString(string(tempStream.Buffer())) -} - -func (encoder *stringModeStringEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.elemEncoder.IsEmpty(ptr) -} diff --git a/cmd/bpa-operator/vendor/github.com/json-iterator/go/stream.go b/cmd/bpa-operator/vendor/github.com/json-iterator/go/stream.go deleted file mode 100644 index 17662fd..0000000 --- a/cmd/bpa-operator/vendor/github.com/json-iterator/go/stream.go +++ /dev/null @@ -1,211 +0,0 @@ -package jsoniter - -import ( - "io" -) - -// stream is a io.Writer like object, with JSON specific write functions. -// Error is not returned as return value, but stored as Error member on this stream instance. -type Stream struct { - cfg *frozenConfig - out io.Writer - buf []byte - Error error - indention int - Attachment interface{} // open for customized encoder -} - -// NewStream create new stream instance. -// cfg can be jsoniter.ConfigDefault. -// out can be nil if write to internal buffer. -// bufSize is the initial size for the internal buffer in bytes. -func NewStream(cfg API, out io.Writer, bufSize int) *Stream { - return &Stream{ - cfg: cfg.(*frozenConfig), - out: out, - buf: make([]byte, 0, bufSize), - Error: nil, - indention: 0, - } -} - -// Pool returns a pool can provide more stream with same configuration -func (stream *Stream) Pool() StreamPool { - return stream.cfg -} - -// Reset reuse this stream instance by assign a new writer -func (stream *Stream) Reset(out io.Writer) { - stream.out = out - stream.buf = stream.buf[:0] -} - -// Available returns how many bytes are unused in the buffer. -func (stream *Stream) Available() int { - return cap(stream.buf) - len(stream.buf) -} - -// Buffered returns the number of bytes that have been written into the current buffer. -func (stream *Stream) Buffered() int { - return len(stream.buf) -} - -// Buffer if writer is nil, use this method to take the result -func (stream *Stream) Buffer() []byte { - return stream.buf -} - -// SetBuffer allows to append to the internal buffer directly -func (stream *Stream) SetBuffer(buf []byte) { - stream.buf = buf -} - -// Write writes the contents of p into the buffer. -// It returns the number of bytes written. -// If nn < len(p), it also returns an error explaining -// why the write is short. -func (stream *Stream) Write(p []byte) (nn int, err error) { - stream.buf = append(stream.buf, p...) - if stream.out != nil { - nn, err = stream.out.Write(stream.buf) - stream.buf = stream.buf[nn:] - return - } - return len(p), nil -} - -// WriteByte writes a single byte. -func (stream *Stream) writeByte(c byte) { - stream.buf = append(stream.buf, c) -} - -func (stream *Stream) writeTwoBytes(c1 byte, c2 byte) { - stream.buf = append(stream.buf, c1, c2) -} - -func (stream *Stream) writeThreeBytes(c1 byte, c2 byte, c3 byte) { - stream.buf = append(stream.buf, c1, c2, c3) -} - -func (stream *Stream) writeFourBytes(c1 byte, c2 byte, c3 byte, c4 byte) { - stream.buf = append(stream.buf, c1, c2, c3, c4) -} - -func (stream *Stream) writeFiveBytes(c1 byte, c2 byte, c3 byte, c4 byte, c5 byte) { - stream.buf = append(stream.buf, c1, c2, c3, c4, c5) -} - -// Flush writes any buffered data to the underlying io.Writer. -func (stream *Stream) Flush() error { - if stream.out == nil { - return nil - } - if stream.Error != nil { - return stream.Error - } - n, err := stream.out.Write(stream.buf) - if err != nil { - if stream.Error == nil { - stream.Error = err - } - return err - } - stream.buf = stream.buf[n:] - return nil -} - -// WriteRaw write string out without quotes, just like []byte -func (stream *Stream) WriteRaw(s string) { - stream.buf = append(stream.buf, s...) -} - -// WriteNil write null to stream -func (stream *Stream) WriteNil() { - stream.writeFourBytes('n', 'u', 'l', 'l') -} - -// WriteTrue write true to stream -func (stream *Stream) WriteTrue() { - stream.writeFourBytes('t', 'r', 'u', 'e') -} - -// WriteFalse write false to stream -func (stream *Stream) WriteFalse() { - stream.writeFiveBytes('f', 'a', 'l', 's', 'e') -} - -// WriteBool write true or false into stream -func (stream *Stream) WriteBool(val bool) { - if val { - stream.WriteTrue() - } else { - stream.WriteFalse() - } -} - -// WriteObjectStart write { with possible indention -func (stream *Stream) WriteObjectStart() { - stream.indention += stream.cfg.indentionStep - stream.writeByte('{') - stream.writeIndention(0) -} - -// WriteObjectField write "field": with possible indention -func (stream *Stream) WriteObjectField(field string) { - stream.WriteString(field) - if stream.indention > 0 { - stream.writeTwoBytes(':', ' ') - } else { - stream.writeByte(':') - } -} - -// WriteObjectEnd write } with possible indention -func (stream *Stream) WriteObjectEnd() { - stream.writeIndention(stream.cfg.indentionStep) - stream.indention -= stream.cfg.indentionStep - stream.writeByte('}') -} - -// WriteEmptyObject write {} -func (stream *Stream) WriteEmptyObject() { - stream.writeByte('{') - stream.writeByte('}') -} - -// WriteMore write , with possible indention -func (stream *Stream) WriteMore() { - stream.writeByte(',') - stream.writeIndention(0) - stream.Flush() -} - -// WriteArrayStart write [ with possible indention -func (stream *Stream) WriteArrayStart() { - stream.indention += stream.cfg.indentionStep - stream.writeByte('[') - stream.writeIndention(0) -} - -// WriteEmptyArray write [] -func (stream *Stream) WriteEmptyArray() { - stream.writeTwoBytes('[', ']') -} - -// WriteArrayEnd write ] with possible indention -func (stream *Stream) WriteArrayEnd() { - stream.writeIndention(stream.cfg.indentionStep) - stream.indention -= stream.cfg.indentionStep - stream.writeByte(']') -} - -func (stream *Stream) writeIndention(delta int) { - if stream.indention == 0 { - return - } - stream.writeByte('\n') - toWrite := stream.indention - delta - for i := 0; i < toWrite; i++ { - stream.buf = append(stream.buf, ' ') - } -} diff --git a/cmd/bpa-operator/vendor/github.com/json-iterator/go/stream_float.go b/cmd/bpa-operator/vendor/github.com/json-iterator/go/stream_float.go deleted file mode 100644 index f318d2c..0000000 --- a/cmd/bpa-operator/vendor/github.com/json-iterator/go/stream_float.go +++ /dev/null @@ -1,94 +0,0 @@ -package jsoniter - -import ( - "math" - "strconv" -) - -var pow10 []uint64 - -func init() { - pow10 = []uint64{1, 10, 100, 1000, 10000, 100000, 1000000} -} - -// WriteFloat32 write float32 to stream -func (stream *Stream) WriteFloat32(val float32) { - abs := math.Abs(float64(val)) - fmt := byte('f') - // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. - if abs != 0 { - if float32(abs) < 1e-6 || float32(abs) >= 1e21 { - fmt = 'e' - } - } - stream.buf = strconv.AppendFloat(stream.buf, float64(val), fmt, -1, 32) -} - -// WriteFloat32Lossy write float32 to stream with ONLY 6 digits precision although much much faster -func (stream *Stream) WriteFloat32Lossy(val float32) { - if val < 0 { - stream.writeByte('-') - val = -val - } - if val > 0x4ffffff { - stream.WriteFloat32(val) - return - } - precision := 6 - exp := uint64(1000000) // 6 - lval := uint64(float64(val)*float64(exp) + 0.5) - stream.WriteUint64(lval / exp) - fval := lval % exp - if fval == 0 { - return - } - stream.writeByte('.') - for p := precision - 1; p > 0 && fval < pow10[p]; p-- { - stream.writeByte('0') - } - stream.WriteUint64(fval) - for stream.buf[len(stream.buf)-1] == '0' { - stream.buf = stream.buf[:len(stream.buf)-1] - } -} - -// WriteFloat64 write float64 to stream -func (stream *Stream) WriteFloat64(val float64) { - abs := math.Abs(val) - fmt := byte('f') - // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. - if abs != 0 { - if abs < 1e-6 || abs >= 1e21 { - fmt = 'e' - } - } - stream.buf = strconv.AppendFloat(stream.buf, float64(val), fmt, -1, 64) -} - -// WriteFloat64Lossy write float64 to stream with ONLY 6 digits precision although much much faster -func (stream *Stream) WriteFloat64Lossy(val float64) { - if val < 0 { - stream.writeByte('-') - val = -val - } - if val > 0x4ffffff { - stream.WriteFloat64(val) - return - } - precision := 6 - exp := uint64(1000000) // 6 - lval := uint64(val*float64(exp) + 0.5) - stream.WriteUint64(lval / exp) - fval := lval % exp - if fval == 0 { - return - } - stream.writeByte('.') - for p := precision - 1; p > 0 && fval < pow10[p]; p-- { - stream.writeByte('0') - } - stream.WriteUint64(fval) - for stream.buf[len(stream.buf)-1] == '0' { - stream.buf = stream.buf[:len(stream.buf)-1] - } -} diff --git a/cmd/bpa-operator/vendor/github.com/json-iterator/go/stream_int.go b/cmd/bpa-operator/vendor/github.com/json-iterator/go/stream_int.go deleted file mode 100644 index d1059ee..0000000 --- a/cmd/bpa-operator/vendor/github.com/json-iterator/go/stream_int.go +++ /dev/null @@ -1,190 +0,0 @@ -package jsoniter - -var digits []uint32 - -func init() { - digits = make([]uint32, 1000) - for i := uint32(0); i < 1000; i++ { - digits[i] = (((i / 100) + '0') << 16) + ((((i / 10) % 10) + '0') << 8) + i%10 + '0' - if i < 10 { - digits[i] += 2 << 24 - } else if i < 100 { - digits[i] += 1 << 24 - } - } -} - -func writeFirstBuf(space []byte, v uint32) []byte { - start := v >> 24 - if start == 0 { - space = append(space, byte(v>>16), byte(v>>8)) - } else if start == 1 { - space = append(space, byte(v>>8)) - } - space = append(space, byte(v)) - return space -} - -func writeBuf(buf []byte, v uint32) []byte { - return append(buf, byte(v>>16), byte(v>>8), byte(v)) -} - -// WriteUint8 write uint8 to stream -func (stream *Stream) WriteUint8(val uint8) { - stream.buf = writeFirstBuf(stream.buf, digits[val]) -} - -// WriteInt8 write int8 to stream -func (stream *Stream) WriteInt8(nval int8) { - var val uint8 - if nval < 0 { - val = uint8(-nval) - stream.buf = append(stream.buf, '-') - } else { - val = uint8(nval) - } - stream.buf = writeFirstBuf(stream.buf, digits[val]) -} - -// WriteUint16 write uint16 to stream -func (stream *Stream) WriteUint16(val uint16) { - q1 := val / 1000 - if q1 == 0 { - stream.buf = writeFirstBuf(stream.buf, digits[val]) - return - } - r1 := val - q1*1000 - stream.buf = writeFirstBuf(stream.buf, digits[q1]) - stream.buf = writeBuf(stream.buf, digits[r1]) - return -} - -// WriteInt16 write int16 to stream -func (stream *Stream) WriteInt16(nval int16) { - var val uint16 - if nval < 0 { - val = uint16(-nval) - stream.buf = append(stream.buf, '-') - } else { - val = uint16(nval) - } - stream.WriteUint16(val) -} - -// WriteUint32 write uint32 to stream -func (stream *Stream) WriteUint32(val uint32) { - q1 := val / 1000 - if q1 == 0 { - stream.buf = writeFirstBuf(stream.buf, digits[val]) - return - } - r1 := val - q1*1000 - q2 := q1 / 1000 - if q2 == 0 { - stream.buf = writeFirstBuf(stream.buf, digits[q1]) - stream.buf = writeBuf(stream.buf, digits[r1]) - return - } - r2 := q1 - q2*1000 - q3 := q2 / 1000 - if q3 == 0 { - stream.buf = writeFirstBuf(stream.buf, digits[q2]) - } else { - r3 := q2 - q3*1000 - stream.buf = append(stream.buf, byte(q3+'0')) - stream.buf = writeBuf(stream.buf, digits[r3]) - } - stream.buf = writeBuf(stream.buf, digits[r2]) - stream.buf = writeBuf(stream.buf, digits[r1]) -} - -// WriteInt32 write int32 to stream -func (stream *Stream) WriteInt32(nval int32) { - var val uint32 - if nval < 0 { - val = uint32(-nval) - stream.buf = append(stream.buf, '-') - } else { - val = uint32(nval) - } - stream.WriteUint32(val) -} - -// WriteUint64 write uint64 to stream -func (stream *Stream) WriteUint64(val uint64) { - q1 := val / 1000 - if q1 == 0 { - stream.buf = writeFirstBuf(stream.buf, digits[val]) - return - } - r1 := val - q1*1000 - q2 := q1 / 1000 - if q2 == 0 { - stream.buf = writeFirstBuf(stream.buf, digits[q1]) - stream.buf = writeBuf(stream.buf, digits[r1]) - return - } - r2 := q1 - q2*1000 - q3 := q2 / 1000 - if q3 == 0 { - stream.buf = writeFirstBuf(stream.buf, digits[q2]) - stream.buf = writeBuf(stream.buf, digits[r2]) - stream.buf = writeBuf(stream.buf, digits[r1]) - return - } - r3 := q2 - q3*1000 - q4 := q3 / 1000 - if q4 == 0 { - stream.buf = writeFirstBuf(stream.buf, digits[q3]) - stream.buf = writeBuf(stream.buf, digits[r3]) - stream.buf = writeBuf(stream.buf, digits[r2]) - stream.buf = writeBuf(stream.buf, digits[r1]) - return - } - r4 := q3 - q4*1000 - q5 := q4 / 1000 - if q5 == 0 { - stream.buf = writeFirstBuf(stream.buf, digits[q4]) - stream.buf = writeBuf(stream.buf, digits[r4]) - stream.buf = writeBuf(stream.buf, digits[r3]) - stream.buf = writeBuf(stream.buf, digits[r2]) - stream.buf = writeBuf(stream.buf, digits[r1]) - return - } - r5 := q4 - q5*1000 - q6 := q5 / 1000 - if q6 == 0 { - stream.buf = writeFirstBuf(stream.buf, digits[q5]) - } else { - stream.buf = writeFirstBuf(stream.buf, digits[q6]) - r6 := q5 - q6*1000 - stream.buf = writeBuf(stream.buf, digits[r6]) - } - stream.buf = writeBuf(stream.buf, digits[r5]) - stream.buf = writeBuf(stream.buf, digits[r4]) - stream.buf = writeBuf(stream.buf, digits[r3]) - stream.buf = writeBuf(stream.buf, digits[r2]) - stream.buf = writeBuf(stream.buf, digits[r1]) -} - -// WriteInt64 write int64 to stream -func (stream *Stream) WriteInt64(nval int64) { - var val uint64 - if nval < 0 { - val = uint64(-nval) - stream.buf = append(stream.buf, '-') - } else { - val = uint64(nval) - } - stream.WriteUint64(val) -} - -// WriteInt write int to stream -func (stream *Stream) WriteInt(val int) { - stream.WriteInt64(int64(val)) -} - -// WriteUint write uint to stream -func (stream *Stream) WriteUint(val uint) { - stream.WriteUint64(uint64(val)) -} diff --git a/cmd/bpa-operator/vendor/github.com/json-iterator/go/stream_str.go b/cmd/bpa-operator/vendor/github.com/json-iterator/go/stream_str.go deleted file mode 100644 index 54c2ba0..0000000 --- a/cmd/bpa-operator/vendor/github.com/json-iterator/go/stream_str.go +++ /dev/null @@ -1,372 +0,0 @@ -package jsoniter - -import ( - "unicode/utf8" -) - -// htmlSafeSet holds the value true if the ASCII character with the given -// array position can be safely represented inside a JSON string, embedded -// inside of HTML